repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1 + 8);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc1x89AB = wasm_f32x4_mul(vi1x89AB, vw89AB);
const v128_t vmask1x89AB = wasm_i32x4_shr(vi1x89AB, 31);
v128_t vacc1xCDEF = wasm_f32x4_mul(vi1xCDEF, vwCDEF);
const v128_t vmask1xCDEF = wasm_i32x4_shr(vi1xCDEF, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x4567, vi1x4567, vmask1x4567);
vacc1x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x89AB, vi1x89AB, vmask1x89AB);
vacc1xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc1xCDEF, vi1xCDEF, vmask1xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,397 | 37.542169 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,535 | 30.855856 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x4567, vi1x4567, vmask1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 5,022 | 33.881944 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-4x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1 + 8);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2 + 8);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 12);
i2 += 16;
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
const v128_t vi3x89AB = wasm_v128_load(i3 + 8);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 12);
i3 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc1x89AB = wasm_f32x4_mul(vi1x89AB, vw89AB);
const v128_t vmask1x89AB = wasm_i32x4_shr(vi1x89AB, 31);
v128_t vacc1xCDEF = wasm_f32x4_mul(vi1xCDEF, vwCDEF);
const v128_t vmask1xCDEF = wasm_i32x4_shr(vi1xCDEF, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc2x4567 = wasm_f32x4_mul(vi2x4567, vw4567);
const v128_t vmask2x4567 = wasm_i32x4_shr(vi2x4567, 31);
v128_t vacc2x89AB = wasm_f32x4_mul(vi2x89AB, vw89AB);
const v128_t vmask2x89AB = wasm_i32x4_shr(vi2x89AB, 31);
v128_t vacc2xCDEF = wasm_f32x4_mul(vi2xCDEF, vwCDEF);
const v128_t vmask2xCDEF = wasm_i32x4_shr(vi2xCDEF, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
v128_t vacc3x4567 = wasm_f32x4_mul(vi3x4567, vw4567);
const v128_t vmask3x4567 = wasm_i32x4_shr(vi3x4567, 31);
v128_t vacc3x89AB = wasm_f32x4_mul(vi3x89AB, vw89AB);
const v128_t vmask3x89AB = wasm_i32x4_shr(vi3x89AB, 31);
v128_t vacc3xCDEF = wasm_f32x4_mul(vi3xCDEF, vwCDEF);
const v128_t vmask3xCDEF = wasm_i32x4_shr(vi3xCDEF, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x4567, vi1x4567, vmask1x4567);
vacc1x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x89AB, vi1x89AB, vmask1x89AB);
vacc1xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc1xCDEF, vi1xCDEF, vmask1xCDEF);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc2x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x4567, vi2x4567, vmask2x4567);
vacc2x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x89AB, vi2x89AB, vmask2x89AB);
vacc2xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc2xCDEF, vi2xCDEF, vmask2xCDEF);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
vacc3x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x4567, vi3x4567, vmask3x4567);
vacc3x89AB = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x89AB, vi3x89AB, vmask3x89AB);
vacc3xCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vacc3xCDEF, vi3xCDEF, vmask3xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
wasm_v128_store(o2 + 8, vacc2x89AB);
wasm_v128_store(o2 + 12, vacc2xCDEF);
o2 += 16;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
wasm_v128_store(o3 + 8, vacc3x89AB);
wasm_v128_store(o3 + 12, vacc3xCDEF);
o3 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 11,316 | 42.526923 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 5,882 | 35.540373 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmrelaxedsimd-laneselect-4x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_4x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc2x4567 = wasm_f32x4_mul(vi2x4567, vw4567);
const v128_t vmask2x4567 = wasm_i32x4_shr(vi2x4567, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
v128_t vacc3x4567 = wasm_f32x4_mul(vi3x4567, vw4567);
const v128_t vmask3x4567 = wasm_i32x4_shr(vi3x4567, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x4567, vi0x4567, vmask0x4567);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x4567, vi1x4567, vmask1x4567);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc2x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x4567, vi2x4567, vmask2x4567);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
vacc3x4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x4567, vi3x4567, vmask3x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
o2 += 8;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
o3 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 8,673 | 38.788991 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-1x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
vacc0x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi0x89AB, vw89AB), vacc0x89AB);
vacc0xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi0xCDEF, vwCDEF), vacc0xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,771 | 30.433333 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 2,331 | 25.804598 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-1x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_1x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,098 | 27.694444 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
v128_t vi1x89AB = wasm_v128_load(i1 + 8);
v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc1x89AB = wasm_i32x4_max(vi1x89AB, vzero);
vi1x89AB = wasm_i32x4_min(vi1x89AB, vzero);
v128_t vacc1xCDEF = wasm_i32x4_max(vi1xCDEF, vzero);
vi1xCDEF = wasm_i32x4_min(vi1xCDEF, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
vacc0x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi0x89AB, vw89AB), vacc0x89AB);
vacc0xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi0xCDEF, vwCDEF), vacc0xCDEF);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vw4567), vacc1x4567);
vacc1x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi1x89AB, vw89AB), vacc1x89AB);
vacc1xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi1xCDEF, vwCDEF), vacc1xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,026 | 35.08982 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,436 | 29.6875 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vw4567), vacc1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,787 | 32.02069 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-4x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
v128_t vi0x89AB = wasm_v128_load(i0 + 8);
v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
v128_t vi1x89AB = wasm_v128_load(i1 + 8);
v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vi2x4567 = wasm_v128_load(i2 + 4);
v128_t vi2x89AB = wasm_v128_load(i2 + 8);
v128_t vi2xCDEF = wasm_v128_load(i2 + 12);
i2 += 16;
v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vi3x4567 = wasm_v128_load(i3 + 4);
v128_t vi3x89AB = wasm_v128_load(i3 + 8);
v128_t vi3xCDEF = wasm_v128_load(i3 + 12);
i3 += 16;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc0x89AB = wasm_i32x4_max(vi0x89AB, vzero);
vi0x89AB = wasm_i32x4_min(vi0x89AB, vzero);
v128_t vacc0xCDEF = wasm_i32x4_max(vi0xCDEF, vzero);
vi0xCDEF = wasm_i32x4_min(vi0xCDEF, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc1x89AB = wasm_i32x4_max(vi1x89AB, vzero);
vi1x89AB = wasm_i32x4_min(vi1x89AB, vzero);
v128_t vacc1xCDEF = wasm_i32x4_max(vi1xCDEF, vzero);
vi1xCDEF = wasm_i32x4_min(vi1xCDEF, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc2x4567 = wasm_i32x4_max(vi2x4567, vzero);
vi2x4567 = wasm_i32x4_min(vi2x4567, vzero);
v128_t vacc2x89AB = wasm_i32x4_max(vi2x89AB, vzero);
vi2x89AB = wasm_i32x4_min(vi2x89AB, vzero);
v128_t vacc2xCDEF = wasm_i32x4_max(vi2xCDEF, vzero);
vi2xCDEF = wasm_i32x4_min(vi2xCDEF, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
v128_t vacc3x4567 = wasm_i32x4_max(vi3x4567, vzero);
vi3x4567 = wasm_i32x4_min(vi3x4567, vzero);
v128_t vacc3x89AB = wasm_i32x4_max(vi3x89AB, vzero);
vi3x89AB = wasm_i32x4_min(vi3x89AB, vzero);
v128_t vacc3xCDEF = wasm_i32x4_max(vi3xCDEF, vzero);
vi3xCDEF = wasm_i32x4_min(vi3xCDEF, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
vacc0x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi0x89AB, vw89AB), vacc0x89AB);
vacc0xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi0xCDEF, vwCDEF), vacc0xCDEF);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vw4567), vacc1x4567);
vacc1x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi1x89AB, vw89AB), vacc1x89AB);
vacc1xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi1xCDEF, vwCDEF), vacc1xCDEF);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vw4567), vacc2x4567);
vacc2x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi2x89AB, vw89AB), vacc2x89AB);
vacc2xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi2xCDEF, vwCDEF), vacc2xCDEF);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vw4567), vacc3x4567);
vacc3x89AB = wasm_f32x4_add(wasm_f32x4_mul(vi3x89AB, vw89AB), vacc3x89AB);
vacc3xCDEF = wasm_f32x4_add(wasm_f32x4_mul(vi3xCDEF, vwCDEF), vacc3xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
wasm_v128_store(o2 + 8, vacc2x89AB);
wasm_v128_store(o2 + 12, vacc2xCDEF);
o2 += 16;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
wasm_v128_store(o3 + 8, vacc3x89AB);
wasm_v128_store(o3 + 12, vacc3xCDEF);
o3 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 10,537 | 39.375479 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 5,647 | 33.864198 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-iminmax-4x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_iminmax_4x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
const v128_t vzero = wasm_i32x4_const_splat(0);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
v128_t vi0x0123 = wasm_v128_load(i0);
v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vi1x0123 = wasm_v128_load(i1);
v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vi2x0123 = wasm_v128_load(i2);
v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
v128_t vi3x0123 = wasm_v128_load(i3);
v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc0x4567 = wasm_i32x4_max(vi0x4567, vzero);
vi0x4567 = wasm_i32x4_min(vi0x4567, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc1x4567 = wasm_i32x4_max(vi1x4567, vzero);
vi1x4567 = wasm_i32x4_min(vi1x4567, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc2x4567 = wasm_i32x4_max(vi2x4567, vzero);
vi2x4567 = wasm_i32x4_min(vi2x4567, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
v128_t vacc3x4567 = wasm_i32x4_max(vi3x4567, vzero);
vi3x4567 = wasm_i32x4_min(vi3x4567, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi0x4567, vw4567), vacc0x4567);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi1x4567, vw4567), vacc1x4567);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi2x4567, vw4567), vacc2x4567);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
vacc3x4567 = wasm_f32x4_add(wasm_f32x4_mul(vi3x4567, vw4567), vacc3x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
o2 += 8;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
o3 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_i32x4_max(vi0x0123, vzero);
vi0x0123 = wasm_i32x4_min(vi0x0123, vzero);
v128_t vacc1x0123 = wasm_i32x4_max(vi1x0123, vzero);
vi1x0123 = wasm_i32x4_min(vi1x0123, vzero);
v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero);
vi2x0123 = wasm_i32x4_min(vi2x0123, vzero);
v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero);
vi3x0123 = wasm_i32x4_min(vi3x0123, vzero);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi0x0123, vw0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi1x0123, vw0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi2x0123, vw0123), vacc2x0123);
vacc3x0123 = wasm_f32x4_add(wasm_f32x4_mul(vi3x0123, vw0123), vacc3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 8,166 | 36.292237 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-1x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = wasm_v128_bitselect(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = wasm_v128_bitselect(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,811 | 31.033613 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-1x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 2,315 | 25.930233 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-1x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_1x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const size_t input_increment = input_stride * 1 - channels;
const size_t output_increment = output_stride * 1 - channels;
do {
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
o0 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
o0 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
rows = doz(rows, 1);
} while (rows != 0);
}
| 3,110 | 28.074766 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1 + 8);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc1x89AB = wasm_f32x4_mul(vi1x89AB, vw89AB);
const v128_t vmask1x89AB = wasm_i32x4_shr(vi1x89AB, 31);
v128_t vacc1xCDEF = wasm_f32x4_mul(vi1xCDEF, vwCDEF);
const v128_t vmask1xCDEF = wasm_i32x4_shr(vi1xCDEF, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = wasm_v128_bitselect(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = wasm_v128_bitselect(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vi1x4567, vmask1x4567);
vacc1x89AB = wasm_v128_bitselect(vacc1x89AB, vi1x89AB, vmask1x89AB);
vacc1xCDEF = wasm_v128_bitselect(vacc1xCDEF, vi1xCDEF, vmask1xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,150 | 36.054217 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-2x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,448 | 30.072072 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vi1x4567, vmask1x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,855 | 32.722222 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-4x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x16(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(float); c -= 16 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
const v128_t vw89AB = wasm_v128_load(w + 8);
const v128_t vwCDEF = wasm_v128_load(w + 12);
w += 16;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
const v128_t vi0x89AB = wasm_v128_load(i0 + 8);
const v128_t vi0xCDEF = wasm_v128_load(i0 + 12);
i0 += 16;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
const v128_t vi1x89AB = wasm_v128_load(i1 + 8);
const v128_t vi1xCDEF = wasm_v128_load(i1 + 12);
i1 += 16;
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
const v128_t vi2x89AB = wasm_v128_load(i2 + 8);
const v128_t vi2xCDEF = wasm_v128_load(i2 + 12);
i2 += 16;
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
const v128_t vi3x89AB = wasm_v128_load(i3 + 8);
const v128_t vi3xCDEF = wasm_v128_load(i3 + 12);
i3 += 16;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc0x89AB = wasm_f32x4_mul(vi0x89AB, vw89AB);
const v128_t vmask0x89AB = wasm_i32x4_shr(vi0x89AB, 31);
v128_t vacc0xCDEF = wasm_f32x4_mul(vi0xCDEF, vwCDEF);
const v128_t vmask0xCDEF = wasm_i32x4_shr(vi0xCDEF, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc1x89AB = wasm_f32x4_mul(vi1x89AB, vw89AB);
const v128_t vmask1x89AB = wasm_i32x4_shr(vi1x89AB, 31);
v128_t vacc1xCDEF = wasm_f32x4_mul(vi1xCDEF, vwCDEF);
const v128_t vmask1xCDEF = wasm_i32x4_shr(vi1xCDEF, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc2x4567 = wasm_f32x4_mul(vi2x4567, vw4567);
const v128_t vmask2x4567 = wasm_i32x4_shr(vi2x4567, 31);
v128_t vacc2x89AB = wasm_f32x4_mul(vi2x89AB, vw89AB);
const v128_t vmask2x89AB = wasm_i32x4_shr(vi2x89AB, 31);
v128_t vacc2xCDEF = wasm_f32x4_mul(vi2xCDEF, vwCDEF);
const v128_t vmask2xCDEF = wasm_i32x4_shr(vi2xCDEF, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
v128_t vacc3x4567 = wasm_f32x4_mul(vi3x4567, vw4567);
const v128_t vmask3x4567 = wasm_i32x4_shr(vi3x4567, 31);
v128_t vacc3x89AB = wasm_f32x4_mul(vi3x89AB, vw89AB);
const v128_t vmask3x89AB = wasm_i32x4_shr(vi3x89AB, 31);
v128_t vacc3xCDEF = wasm_f32x4_mul(vi3xCDEF, vwCDEF);
const v128_t vmask3xCDEF = wasm_i32x4_shr(vi3xCDEF, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
vacc0x89AB = wasm_v128_bitselect(vacc0x89AB, vi0x89AB, vmask0x89AB);
vacc0xCDEF = wasm_v128_bitselect(vacc0xCDEF, vi0xCDEF, vmask0xCDEF);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vi1x4567, vmask1x4567);
vacc1x89AB = wasm_v128_bitselect(vacc1x89AB, vi1x89AB, vmask1x89AB);
vacc1xCDEF = wasm_v128_bitselect(vacc1xCDEF, vi1xCDEF, vmask1xCDEF);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vi2x4567, vmask2x4567);
vacc2x89AB = wasm_v128_bitselect(vacc2x89AB, vi2x89AB, vmask2x89AB);
vacc2xCDEF = wasm_v128_bitselect(vacc2xCDEF, vi2xCDEF, vmask2xCDEF);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vi3x4567, vmask3x4567);
vacc3x89AB = wasm_v128_bitselect(vacc3x89AB, vi3x89AB, vmask3x89AB);
vacc3xCDEF = wasm_v128_bitselect(vacc3xCDEF, vi3xCDEF, vmask3xCDEF);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
wasm_v128_store(o0 + 8, vacc0x89AB);
wasm_v128_store(o0 + 12, vacc0xCDEF);
o0 += 16;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
wasm_v128_store(o1 + 8, vacc1x89AB);
wasm_v128_store(o1 + 12, vacc1xCDEF);
o1 += 16;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
wasm_v128_store(o2 + 8, vacc2x89AB);
wasm_v128_store(o2 + 12, vacc2xCDEF);
o2 += 16;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
wasm_v128_store(o3 + 8, vacc3x89AB);
wasm_v128_store(o3 + 12, vacc3xCDEF);
o3 += 16;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 10,829 | 40.653846 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-4x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x4(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 5,715 | 34.503106 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-prelu/gen/f32-prelu-wasmsimd-laneselect-4x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-prelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f32_prelu_ukernel__wasmsimd_laneselect_4x8(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
float* o2 = (float*) ((uintptr_t) o1 + output_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
float* o3 = (float*) ((uintptr_t) o2 + output_stride);
const size_t input_increment = input_stride * 4 - channels;
const size_t output_increment = output_stride * 4 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = i1;
o2 = o1;
}
if XNN_UNPREDICTABLE(rows < 4) {
i3 = i2;
o3 = o2;
}
const float* w = weights;
size_t c = channels;
for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
const v128_t vw4567 = wasm_v128_load(w + 4);
w += 8;
const v128_t vi0x0123 = wasm_v128_load(i0);
const v128_t vi0x4567 = wasm_v128_load(i0 + 4);
i0 += 8;
const v128_t vi1x0123 = wasm_v128_load(i1);
const v128_t vi1x4567 = wasm_v128_load(i1 + 4);
i1 += 8;
const v128_t vi2x0123 = wasm_v128_load(i2);
const v128_t vi2x4567 = wasm_v128_load(i2 + 4);
i2 += 8;
const v128_t vi3x0123 = wasm_v128_load(i3);
const v128_t vi3x4567 = wasm_v128_load(i3 + 4);
i3 += 8;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc0x4567 = wasm_f32x4_mul(vi0x4567, vw4567);
const v128_t vmask0x4567 = wasm_i32x4_shr(vi0x4567, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc1x4567 = wasm_f32x4_mul(vi1x4567, vw4567);
const v128_t vmask1x4567 = wasm_i32x4_shr(vi1x4567, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc2x4567 = wasm_f32x4_mul(vi2x4567, vw4567);
const v128_t vmask2x4567 = wasm_i32x4_shr(vi2x4567, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
v128_t vacc3x4567 = wasm_f32x4_mul(vi3x4567, vw4567);
const v128_t vmask3x4567 = wasm_i32x4_shr(vi3x4567, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vi0x4567, vmask0x4567);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vi1x4567, vmask1x4567);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc2x4567 = wasm_v128_bitselect(vacc2x4567, vi2x4567, vmask2x4567);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
vacc3x4567 = wasm_v128_bitselect(vacc3x4567, vi3x4567, vmask3x4567);
wasm_v128_store(o0, vacc0x0123);
wasm_v128_store(o0 + 4, vacc0x4567);
o0 += 8;
wasm_v128_store(o1, vacc1x0123);
wasm_v128_store(o1 + 4, vacc1x4567);
o1 += 8;
wasm_v128_store(o2, vacc2x0123);
wasm_v128_store(o2 + 4, vacc2x4567);
o2 += 8;
wasm_v128_store(o3, vacc3x0123);
wasm_v128_store(o3 + 4, vacc3x4567);
o3 += 8;
}
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vw0123 = wasm_v128_load(w);
w += 4;
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 += 4;
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
wasm_v128_store(o0, vacc0x0123);
o0 += 4;
wasm_v128_store(o1, vacc1x0123);
o1 += 4;
wasm_v128_store(o2, vacc2x0123);
o2 += 4;
wasm_v128_store(o3, vacc3x0123);
o3 += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vw0123 = wasm_v128_load(w);
w = (const float*) ((uintptr_t) w + c);
const v128_t vi0x0123 = wasm_v128_load(i0);
i0 = (const float*) ((uintptr_t) i0 + c);
const v128_t vi1x0123 = wasm_v128_load(i1);
i1 = (const float*) ((uintptr_t) i1 + c);
const v128_t vi2x0123 = wasm_v128_load(i2);
i2 = (const float*) ((uintptr_t) i2 + c);
const v128_t vi3x0123 = wasm_v128_load(i3);
i3 = (const float*) ((uintptr_t) i3 + c);
v128_t vacc0x0123 = wasm_f32x4_mul(vi0x0123, vw0123);
const v128_t vmask0x0123 = wasm_i32x4_shr(vi0x0123, 31);
v128_t vacc1x0123 = wasm_f32x4_mul(vi1x0123, vw0123);
const v128_t vmask1x0123 = wasm_i32x4_shr(vi1x0123, 31);
v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123);
const v128_t vmask2x0123 = wasm_i32x4_shr(vi2x0123, 31);
v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123);
const v128_t vmask3x0123 = wasm_i32x4_shr(vi3x0123, 31);
vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vi0x0123, vmask0x0123);
vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vi1x0123, vmask1x0123);
vacc2x0123 = wasm_v128_bitselect(vacc2x0123, vi2x0123, vmask2x0123);
vacc3x0123 = wasm_v128_bitselect(vacc3x0123, vi3x0123, vmask3x0123);
if (c & (2 * sizeof(float))) {
wasm_v128_store64_lane(o0, vacc0x0123, 0);
wasm_v128_store64_lane(o1, vacc1x0123, 0);
wasm_v128_store64_lane(o2, vacc2x0123, 0);
wasm_v128_store64_lane(o3, vacc3x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc3x0123 = wasm_v64x2_shuffle(vacc3x0123, vacc3x0123, 1, 1);
o0 += 2;
o1 += 2;
o2 += 2;
o3 += 2;
}
if (c & (1 * sizeof(float))) {
wasm_v128_store32_lane(o0, vacc0x0123, 0);
wasm_v128_store32_lane(o1, vacc1x0123, 0);
wasm_v128_store32_lane(o2, vacc2x0123, 0);
wasm_v128_store32_lane(o3, vacc3x0123, 0);
o0 += 1;
o1 += 1;
o2 += 1;
o3 += 1;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
o2 = (float*) ((uintptr_t) o2 + output_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
o3 = (float*) ((uintptr_t) o3 + output_increment);
rows = doz(rows, 4);
} while (rows != 0);
}
| 8,346 | 37.288991 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,439 | 30.489362 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,517 | 31.042553 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,382 | 33.785714 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,458 | 33.835938 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,371 | 33.698413 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-1x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_1x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,461 | 33.859375 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-2x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc12 = math_muladd_f32(va10, vb20, vacc12);
vacc13 = math_muladd_f32(va10, vb30, vacc13);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc12 = math_muladd_f32(va11, vb21, vacc12);
vacc13 = math_muladd_f32(va11, vb31, vacc13);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,159 | 31.592593 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-2x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_2x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc12 = math_muladd_f32(va10, vb20, vacc12);
vacc13 = math_muladd_f32(va10, vb30, vacc13);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc12 = math_muladd_f32(va11, vb21, vacc12);
vacc13 = math_muladd_f32(va11, vb31, vacc13);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,317 | 32.428571 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const float va20 = *a2++;
const float va21 = *a2++;
const float va30 = *a3++;
const float va31 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc20 = math_muladd_f32(va20, vb00, vacc20);
vacc21 = math_muladd_f32(va20, vb10, vacc21);
vacc30 = math_muladd_f32(va30, vb00, vacc30);
vacc31 = math_muladd_f32(va30, vb10, vacc31);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc20 = math_muladd_f32(va21, vb01, vacc20);
vacc21 = math_muladd_f32(va21, vb11, vacc21);
vacc30 = math_muladd_f32(va31, vb01, vacc30);
vacc31 = math_muladd_f32(va31, vb11, vacc31);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,095 | 30.75 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x2-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x2__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const float va20 = *a2++;
const float va21 = *a2++;
const float va30 = *a3++;
const float va31 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc20 = math_muladd_f32(va20, vb00, vacc20);
vacc21 = math_muladd_f32(va20, vb10, vacc21);
vacc30 = math_muladd_f32(va30, vb00, vacc30);
vacc31 = math_muladd_f32(va30, vb10, vacc31);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc20 = math_muladd_f32(va21, vb01, vacc20);
vacc21 = math_muladd_f32(va21, vb11, vacc21);
vacc30 = math_muladd_f32(va31, vb01, vacc30);
vacc31 = math_muladd_f32(va31, vb11, vacc31);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,253 | 31.572917 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const float va20 = *a2++;
const float va21 = *a2++;
const float va30 = *a3++;
const float va31 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc12 = math_muladd_f32(va10, vb20, vacc12);
vacc13 = math_muladd_f32(va10, vb30, vacc13);
vacc20 = math_muladd_f32(va20, vb00, vacc20);
vacc21 = math_muladd_f32(va20, vb10, vacc21);
vacc22 = math_muladd_f32(va20, vb20, vacc22);
vacc23 = math_muladd_f32(va20, vb30, vacc23);
vacc30 = math_muladd_f32(va30, vb00, vacc30);
vacc31 = math_muladd_f32(va30, vb10, vacc31);
vacc32 = math_muladd_f32(va30, vb20, vacc32);
vacc33 = math_muladd_f32(va30, vb30, vacc33);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc12 = math_muladd_f32(va11, vb21, vacc12);
vacc13 = math_muladd_f32(va11, vb31, vacc13);
vacc20 = math_muladd_f32(va21, vb01, vacc20);
vacc21 = math_muladd_f32(va21, vb11, vacc21);
vacc22 = math_muladd_f32(va21, vb21, vacc22);
vacc23 = math_muladd_f32(va21, vb31, vacc23);
vacc30 = math_muladd_f32(va31, vb01, vacc30);
vacc31 = math_muladd_f32(va31, vb11, vacc31);
vacc32 = math_muladd_f32(va31, vb21, vacc32);
vacc33 = math_muladd_f32(va31, vb31, vacc33);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc22 = math_max_f32(vacc22, vmin);
vacc23 = math_max_f32(vacc23, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc32 = math_max_f32(vacc32, vmin);
vacc33 = math_max_f32(vacc33, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc22 = math_min_f32(vacc22, vmax);
vacc23 = math_min_f32(vacc23, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
vacc32 = math_min_f32(vacc32, vmax);
vacc33 = math_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 9,598 | 32.680702 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
const int32_t vbias = params->scalar.bias[0];
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float va00 = *a0++;
const float va01 = *a0++;
const float va10 = *a1++;
const float va11 = *a1++;
const float va20 = *a2++;
const float va21 = *a2++;
const float va30 = *a3++;
const float va31 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb00 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb10 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb20 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb30 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
const float vb01 = (float) ((int32_t) (vbi0 >> 4) + vbias);
const float vb11 = (float) ((int32_t) (vbi1 >> 4) + vbias);
const float vb21 = (float) ((int32_t) (vbi2 >> 4) + vbias);
const float vb31 = (float) ((int32_t) (vbi3 >> 4) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va00, vb00, vacc00);
vacc01 = math_muladd_f32(va00, vb10, vacc01);
vacc02 = math_muladd_f32(va00, vb20, vacc02);
vacc03 = math_muladd_f32(va00, vb30, vacc03);
vacc10 = math_muladd_f32(va10, vb00, vacc10);
vacc11 = math_muladd_f32(va10, vb10, vacc11);
vacc12 = math_muladd_f32(va10, vb20, vacc12);
vacc13 = math_muladd_f32(va10, vb30, vacc13);
vacc20 = math_muladd_f32(va20, vb00, vacc20);
vacc21 = math_muladd_f32(va20, vb10, vacc21);
vacc22 = math_muladd_f32(va20, vb20, vacc22);
vacc23 = math_muladd_f32(va20, vb30, vacc23);
vacc30 = math_muladd_f32(va30, vb00, vacc30);
vacc31 = math_muladd_f32(va30, vb10, vacc31);
vacc32 = math_muladd_f32(va30, vb20, vacc32);
vacc33 = math_muladd_f32(va30, vb30, vacc33);
vacc00 = math_muladd_f32(va01, vb01, vacc00);
vacc01 = math_muladd_f32(va01, vb11, vacc01);
vacc02 = math_muladd_f32(va01, vb21, vacc02);
vacc03 = math_muladd_f32(va01, vb31, vacc03);
vacc10 = math_muladd_f32(va11, vb01, vacc10);
vacc11 = math_muladd_f32(va11, vb11, vacc11);
vacc12 = math_muladd_f32(va11, vb21, vacc12);
vacc13 = math_muladd_f32(va11, vb31, vacc13);
vacc20 = math_muladd_f32(va21, vb01, vacc20);
vacc21 = math_muladd_f32(va21, vb11, vacc21);
vacc22 = math_muladd_f32(va21, vb21, vacc22);
vacc23 = math_muladd_f32(va21, vb31, vacc23);
vacc30 = math_muladd_f32(va31, vb01, vacc30);
vacc31 = math_muladd_f32(va31, vb11, vacc31);
vacc32 = math_muladd_f32(va31, vb21, vacc32);
vacc33 = math_muladd_f32(va31, vb31, vacc33);
}
if XNN_UNLIKELY(k != 0) {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const uint8_t vbi0 = ((const uint8_t*) w)[0];
const uint8_t vbi1 = ((const uint8_t*) w)[1];
const uint8_t vbi2 = ((const uint8_t*) w)[2];
const uint8_t vbi3 = ((const uint8_t*) w)[3];
const float vb0 = (float) ((int32_t) (vbi0 & 0xF) + vbias);
const float vb1 = (float) ((int32_t) (vbi1 & 0xF) + vbias);
const float vb2 = (float) ((int32_t) (vbi2 & 0xF) + vbias);
const float vb3 = (float) ((int32_t) (vbi3 & 0xF) + vbias);
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
}
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 9,916 | 33.796491 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,761 | 38.468468 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,101 | 38.573913 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,750 | 38.418919 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-4x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_4x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,104 | 38.586957 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-5x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_5x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,220 | 39.240157 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-5x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_5x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,209 | 39.19685 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,680 | 39.842657 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,196 | 39.92953 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,669 | 39.804196 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc4w-gemm/gen/f32-qc4w-gemm-6x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc4w_gemm_minmax_ukernel_6x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_qc4w_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
const int16x8_t vbias = vld1q_dup_s16(¶ms->scalar.bias[0]);
const uint8x8_t vmask = vmov_n_u8(UINT8_C(0xF));
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567c0 = vand_u8(vw01234567c01, vmask);
const uint8x8_t vw01234567c1 = vshr_n_u8(vw01234567c01, 4);
const int16x8_t vxw01234567c0 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c0));
const int16x8_t vxw01234567c1 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567c1));
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const uint8x8_t vw01234567c01 = vld1_u8(w); w = (const uint8_t*) w + 8;
const uint8x8_t vw01234567 = vand_u8(vw01234567c01, vmask);
const int16x8_t vxw01234567 = vaddw_s8(vbias, vreinterpret_s8_u8(vw01234567));
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,199 | 39.939597 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x16-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x16__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw89ABCDEFc0 = vmovl_s8(vw89ABCDEFc0);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw89ABc0 = vmovl_s16(vget_low_s16(vxw89ABCDEFc0));
const int32x4_t vxwCDEFc0 = vmovl_s16(vget_high_s16(vxw89ABCDEFc0));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb89ABc0 = vcvtq_f32_s32(vxw89ABc0);
const float32x4_t vbCDEFc0 = vcvtq_f32_s32(vxwCDEFc0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc0, vget_low_f32(va0), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc0, vget_low_f32(va0), 0);
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int16x8_t vxw89ABCDEFc1 = vmovl_s8(vw89ABCDEFc1);
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const int32x4_t vxw89ABc1 = vmovl_s16(vget_low_s16(vxw89ABCDEFc1));
const int32x4_t vxwCDEFc1 = vmovl_s16(vget_high_s16(vxw89ABCDEFc1));
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t vb89ABc1 = vcvtq_f32_s32(vxw89ABc1);
const float32x4_t vbCDEFc1 = vcvtq_f32_s32(vxwCDEFc1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc1, vget_low_f32(va0), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc1, vget_low_f32(va0), 1);
const int8x8_t vw01234567c2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c2 = vmovl_s8(vw01234567c2);
const int16x8_t vxw89ABCDEFc2 = vmovl_s8(vw89ABCDEFc2);
const int32x4_t vxw0123c2 = vmovl_s16(vget_low_s16(vxw01234567c2));
const int32x4_t vxw4567c2 = vmovl_s16(vget_high_s16(vxw01234567c2));
const int32x4_t vxw89ABc2 = vmovl_s16(vget_low_s16(vxw89ABCDEFc2));
const int32x4_t vxwCDEFc2 = vmovl_s16(vget_high_s16(vxw89ABCDEFc2));
const float32x4_t vb0123c2 = vcvtq_f32_s32(vxw0123c2);
const float32x4_t vb4567c2 = vcvtq_f32_s32(vxw4567c2);
const float32x4_t vb89ABc2 = vcvtq_f32_s32(vxw89ABc2);
const float32x4_t vbCDEFc2 = vcvtq_f32_s32(vxwCDEFc2);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc2, vget_high_f32(va0), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc2, vget_high_f32(va0), 0);
const int8x8_t vw01234567c3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c3 = vmovl_s8(vw01234567c3);
const int16x8_t vxw89ABCDEFc3 = vmovl_s8(vw89ABCDEFc3);
const int32x4_t vxw0123c3 = vmovl_s16(vget_low_s16(vxw01234567c3));
const int32x4_t vxw4567c3 = vmovl_s16(vget_high_s16(vxw01234567c3));
const int32x4_t vxw89ABc3 = vmovl_s16(vget_low_s16(vxw89ABCDEFc3));
const int32x4_t vxwCDEFc3 = vmovl_s16(vget_high_s16(vxw89ABCDEFc3));
const float32x4_t vb0123c3 = vcvtq_f32_s32(vxw0123c3);
const float32x4_t vb4567c3 = vcvtq_f32_s32(vxw4567c3);
const float32x4_t vb89ABc3 = vcvtq_f32_s32(vxw89ABc3);
const float32x4_t vbCDEFc3 = vcvtq_f32_s32(vxwCDEFc3);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc3, vget_high_f32(va0), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc3, vget_high_f32(va0), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567 = vmovl_s8(vw01234567);
const int16x8_t vxw89ABCDEF = vmovl_s8(vw89ABCDEF);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw89AB = vmovl_s16(vget_low_s16(vxw89ABCDEF));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const int32x4_t vxwCDEF = vmovl_s16(vget_high_s16(vxw89ABCDEF));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
const float32x4_t vb89AB = vcvtq_f32_s32(vxw89AB);
const float32x4_t vbCDEF = vcvtq_f32_s32(vxwCDEF);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0, vbCDEF);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale4567 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale89AB = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscaleCDEF = vld1q_f32(w); w = ((const float*) w + 4);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc0x89AB = vmulq_f32(vacc0x89AB, vscale89AB);
vacc0xCDEF = vmulq_f32(vacc0xCDEF, vscaleCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,021 | 41.961905 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc0x89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
w = (const float*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256i vbi89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) ((const int8_t*) w + 8)));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
const __m256 vb89ABCDEF = _mm256_cvtepi32_ps(vbi89ABCDEF);
w = (const int8_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
vacc0x89ABCDEF = _mm256_mul_ps(vacc0x89ABCDEF, vscale89ABCDEF);
w = (const float*) w + 16;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
vacc0x01234567 = vacc0x89ABCDEF;
c0 += 8;
}
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,277 | 27.258621 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
a0 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 2,501 | 27.11236 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,838 | 24.576577 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,916 | 25.279279 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,587 | 23.884615 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_1x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,625 | 24.25 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc01 *= vscale1;
vacc02 *= vscale2;
vacc03 *= vscale3;
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,421 | 23.22 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
const int8x8_t vw01234567c2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c2 = vmovl_s8(vw01234567c2);
const int32x4_t vxw0123c2 = vmovl_s16(vget_low_s16(vxw01234567c2));
const int32x4_t vxw4567c2 = vmovl_s16(vget_high_s16(vxw01234567c2));
const float32x4_t vb0123c2 = vcvtq_f32_s32(vxw0123c2);
const float32x4_t vb4567c2 = vcvtq_f32_s32(vxw4567c2);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
const int8x8_t vw01234567c3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c3 = vmovl_s8(vw01234567c3);
const int32x4_t vxw0123c3 = vmovl_s16(vget_low_s16(vxw01234567c3));
const int32x4_t vxw4567c3 = vmovl_s16(vget_high_s16(vxw01234567c3));
const float32x4_t vb0123c3 = vcvtq_f32_s32(vxw0123c3);
const float32x4_t vb4567c3 = vcvtq_f32_s32(vxw4567c3);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567 = vmovl_s8(vw01234567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale4567 = vld1q_f32(w); w = ((const float*) w + 4);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,334 | 34.805369 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,228 | 33.104839 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,515 | 24.16 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,304 | 33.166667 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,217 | 33.016129 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,307 | 33.190476 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,865 | 36.126582 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-sse2-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__sse2_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) w);
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,932 | 26.157407 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-sse41-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__sse41_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const void*) w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,612 | 34.301887 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8-minmax-sse41-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8__sse41_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,916 | 25.761468 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8s4-minmax-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,875 | 43.247191 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-1x8s4-minmax-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_1x8s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,449 | 40.620112 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-2x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,078 | 26.375839 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-2x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_2x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,236 | 27.436242 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-2x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc12 = math_max_f32(vacc12, 0.0f);
vacc13 = math_max_f32(vacc13, 0.0f);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,663 | 25.550725 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-2x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_2x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f);
vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,741 | 26.115942 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-2x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,333 | 24.646154 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc0x89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256i vbi89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) ((const int8_t*) w + 8)));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
const __m256 vb89ABCDEF = _mm256_cvtepi32_ps(vbi89ABCDEF);
w = (const int8_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
vacc0x89ABCDEF = _mm256_mul_ps(vacc0x89ABCDEF, vscale89ABCDEF);
vacc1x89ABCDEF = _mm256_mul_ps(vacc1x89ABCDEF, vscale89ABCDEF);
vacc2x89ABCDEF = _mm256_mul_ps(vacc2x89ABCDEF, vscale89ABCDEF);
w = (const float*) w + 16;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,258 | 33.39011 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,388 | 41.577869 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8-minmax-sse2-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8__sse2_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) w);
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,271 | 30.759036 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8-minmax-sse41-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8__sse41_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const void*) w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 10,135 | 40.371429 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8-minmax-sse41-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8__sse41_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,255 | 30.473054 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8s4-minmax-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 14,008 | 49.032143 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-3x8s4-minmax-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_3x8s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 13,582 | 47.338078 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x16-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x16__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc1x89AB = vacc0x89AB;
float32x4_t vacc1xCDEF = vacc0xCDEF;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc2x89AB = vacc0x89AB;
float32x4_t vacc2xCDEF = vacc0xCDEF;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc3x89AB = vacc0x89AB;
float32x4_t vacc3xCDEF = vacc0xCDEF;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw89ABCDEFc0 = vmovl_s8(vw89ABCDEFc0);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw89ABc0 = vmovl_s16(vget_low_s16(vxw89ABCDEFc0));
const int32x4_t vxwCDEFc0 = vmovl_s16(vget_high_s16(vxw89ABCDEFc0));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb89ABc0 = vcvtq_f32_s32(vxw89ABc0);
const float32x4_t vbCDEFc0 = vcvtq_f32_s32(vxwCDEFc0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc0, vget_low_f32(va0), 0);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc0, vget_low_f32(va1), 0);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc0, vget_low_f32(va2), 0);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc0, vget_low_f32(va3), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc0, vget_low_f32(va0), 0);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc0, vget_low_f32(va1), 0);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc0, vget_low_f32(va2), 0);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc0, vget_low_f32(va3), 0);
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int16x8_t vxw89ABCDEFc1 = vmovl_s8(vw89ABCDEFc1);
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const int32x4_t vxw89ABc1 = vmovl_s16(vget_low_s16(vxw89ABCDEFc1));
const int32x4_t vxwCDEFc1 = vmovl_s16(vget_high_s16(vxw89ABCDEFc1));
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t vb89ABc1 = vcvtq_f32_s32(vxw89ABc1);
const float32x4_t vbCDEFc1 = vcvtq_f32_s32(vxwCDEFc1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc1, vget_low_f32(va0), 1);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc1, vget_low_f32(va1), 1);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc1, vget_low_f32(va2), 1);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc1, vget_low_f32(va3), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc1, vget_low_f32(va0), 1);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc1, vget_low_f32(va1), 1);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc1, vget_low_f32(va2), 1);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc1, vget_low_f32(va3), 1);
const int8x8_t vw01234567c2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c2 = vmovl_s8(vw01234567c2);
const int16x8_t vxw89ABCDEFc2 = vmovl_s8(vw89ABCDEFc2);
const int32x4_t vxw0123c2 = vmovl_s16(vget_low_s16(vxw01234567c2));
const int32x4_t vxw4567c2 = vmovl_s16(vget_high_s16(vxw01234567c2));
const int32x4_t vxw89ABc2 = vmovl_s16(vget_low_s16(vxw89ABCDEFc2));
const int32x4_t vxwCDEFc2 = vmovl_s16(vget_high_s16(vxw89ABCDEFc2));
const float32x4_t vb0123c2 = vcvtq_f32_s32(vxw0123c2);
const float32x4_t vb4567c2 = vcvtq_f32_s32(vxw4567c2);
const float32x4_t vb89ABc2 = vcvtq_f32_s32(vxw89ABc2);
const float32x4_t vbCDEFc2 = vcvtq_f32_s32(vxwCDEFc2);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc2, vget_high_f32(va0), 0);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc2, vget_high_f32(va1), 0);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc2, vget_high_f32(va2), 0);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc2, vget_high_f32(va3), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc2, vget_high_f32(va0), 0);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc2, vget_high_f32(va1), 0);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc2, vget_high_f32(va2), 0);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc2, vget_high_f32(va3), 0);
const int8x8_t vw01234567c3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEFc3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c3 = vmovl_s8(vw01234567c3);
const int16x8_t vxw89ABCDEFc3 = vmovl_s8(vw89ABCDEFc3);
const int32x4_t vxw0123c3 = vmovl_s16(vget_low_s16(vxw01234567c3));
const int32x4_t vxw4567c3 = vmovl_s16(vget_high_s16(vxw01234567c3));
const int32x4_t vxw89ABc3 = vmovl_s16(vget_low_s16(vxw89ABCDEFc3));
const int32x4_t vxwCDEFc3 = vmovl_s16(vget_high_s16(vxw89ABCDEFc3));
const float32x4_t vb0123c3 = vcvtq_f32_s32(vxw0123c3);
const float32x4_t vb4567c3 = vcvtq_f32_s32(vxw4567c3);
const float32x4_t vb89ABc3 = vcvtq_f32_s32(vxw89ABc3);
const float32x4_t vbCDEFc3 = vcvtq_f32_s32(vxwCDEFc3);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc3, vget_high_f32(va0), 1);
vacc1x89AB = vfmaq_lane_f32(vacc1x89AB, vb89ABc3, vget_high_f32(va1), 1);
vacc2x89AB = vfmaq_lane_f32(vacc2x89AB, vb89ABc3, vget_high_f32(va2), 1);
vacc3x89AB = vfmaq_lane_f32(vacc3x89AB, vb89ABc3, vget_high_f32(va3), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc3, vget_high_f32(va0), 1);
vacc1xCDEF = vfmaq_lane_f32(vacc1xCDEF, vbCDEFc3, vget_high_f32(va1), 1);
vacc2xCDEF = vfmaq_lane_f32(vacc2xCDEF, vbCDEFc3, vget_high_f32(va2), 1);
vacc3xCDEF = vfmaq_lane_f32(vacc3xCDEF, vbCDEFc3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw89ABCDEF = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567 = vmovl_s8(vw01234567);
const int16x8_t vxw89ABCDEF = vmovl_s8(vw89ABCDEF);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw89AB = vmovl_s16(vget_low_s16(vxw89ABCDEF));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const int32x4_t vxwCDEF = vmovl_s16(vget_high_s16(vxw89ABCDEF));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
const float32x4_t vb89AB = vcvtq_f32_s32(vxw89AB);
const float32x4_t vbCDEF = vcvtq_f32_s32(vxwCDEF);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0, vb89AB);
vacc1x89AB = vfmaq_f32(vacc1x89AB, va1, vb89AB);
vacc2x89AB = vfmaq_f32(vacc2x89AB, va2, vb89AB);
vacc3x89AB = vfmaq_f32(vacc3x89AB, va3, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0, vbCDEF);
vacc1xCDEF = vfmaq_f32(vacc1xCDEF, va1, vbCDEF);
vacc2xCDEF = vfmaq_f32(vacc2xCDEF, va2, vbCDEF);
vacc3xCDEF = vfmaq_f32(vacc3xCDEF, va3, vbCDEF);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale4567 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale89AB = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscaleCDEF = vld1q_f32(w); w = ((const float*) w + 4);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc0x89AB = vmulq_f32(vacc0x89AB, vscale89AB);
vacc1x89AB = vmulq_f32(vacc1x89AB, vscale89AB);
vacc2x89AB = vmulq_f32(vacc2x89AB, vscale89AB);
vacc3x89AB = vmulq_f32(vacc3x89AB, vscale89AB);
vacc0xCDEF = vmulq_f32(vacc0xCDEF, vscaleCDEF);
vacc1xCDEF = vmulq_f32(vacc1xCDEF, vscaleCDEF);
vacc2xCDEF = vmulq_f32(vacc2xCDEF, vscaleCDEF);
vacc3xCDEF = vmulq_f32(vacc3xCDEF, vscaleCDEF);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc1x89AB = vminq_f32(vacc1x89AB, vmax);
vacc2x89AB = vminq_f32(vacc2x89AB, vmax);
vacc3x89AB = vminq_f32(vacc3x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
vacc1xCDEF = vminq_f32(vacc1xCDEF, vmax);
vacc2xCDEF = vminq_f32(vacc2xCDEF, vmax);
vacc3xCDEF = vminq_f32(vacc3xCDEF, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc1x89AB = vmaxq_f32(vacc1x89AB, vmin);
vacc2x89AB = vmaxq_f32(vacc2x89AB, vmin);
vacc3x89AB = vmaxq_f32(vacc3x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
vacc1xCDEF = vmaxq_f32(vacc1xCDEF, vmin);
vacc2xCDEF = vmaxq_f32(vacc2xCDEF, vmin);
vacc3xCDEF = vmaxq_f32(vacc3xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
vst1q_f32(c3 + 8, vacc3x89AB);
vst1q_f32(c3 + 12, vacc3xCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
vst1q_f32(c2 + 8, vacc2x89AB);
vst1q_f32(c2 + 12, vacc2xCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
vst1q_f32(c1 + 8, vacc1x89AB);
vst1q_f32(c1 + 12, vacc1xCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c3, vacc3x4567); c3 += 4;
vst1q_f32(c2, vacc2x4567); c2 += 4;
vst1q_f32(c1, vacc1x4567); c1 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc3x0123 = vacc3x89AB;
vacc3x4567 = vacc3xCDEF;
vacc2x0123 = vacc2x89AB;
vacc2x4567 = vacc2xCDEF;
vacc1x0123 = vacc1x89AB;
vacc1x4567 = vacc1xCDEF;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc3x4567 = vacc3x89AB;
vacc3x89AB = vacc3xCDEF;
vacc2x0123 = vacc2x4567;
vacc2x4567 = vacc2x89AB;
vacc2x89AB = vacc2xCDEF;
vacc1x0123 = vacc1x4567;
vacc1x4567 = vacc1x89AB;
vacc1x89AB = vacc1xCDEF;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,794 | 46.462121 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc0x89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256i vbi89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) ((const int8_t*) w + 8)));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
const __m256 vb89ABCDEF = _mm256_cvtepi32_ps(vbi89ABCDEF);
w = (const int8_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
vacc0x89ABCDEF = _mm256_mul_ps(vacc0x89ABCDEF, vscale89ABCDEF);
vacc1x89ABCDEF = _mm256_mul_ps(vacc1x89ABCDEF, vscale89ABCDEF);
vacc2x89ABCDEF = _mm256_mul_ps(vacc2x89ABCDEF, vscale89ABCDEF);
vacc3x89ABCDEF = _mm256_mul_ps(vacc3x89ABCDEF, vscale89ABCDEF);
w = (const float*) w + 16;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,749 | 35.046512 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_mul_ps(vacc1x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_mul_ps(vacc2x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_mul_ps(vacc3x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 5,068 | 35.207143 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x2_t vacc0x01 = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint32x2_t vtmpb = vld1_dup_u32(w); w = (const int8_t*) w + 4;
const int32x4_t vtmpi = vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u32(vtmpb))));
const float32x4_t vb01c01 = vcvtq_f32_s32(vtmpi);
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
#else
const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
#endif
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
#else
const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
#endif
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const uint16x4_t vtmpb = vld1_dup_u16(w); w = (const int8_t*) w + 2;
const int32x2_t vtmpi = vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u16(vtmpb)))));
const float32x2_t vb01 = vcvt_f32_s32(vtmpi);
vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
}
const float32x2_t vscale = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
vacc0x01 = vmul_f32(vacc0x01, vscale);
vacc1x01 = vmul_f32(vacc1x01, vscale);
vacc2x01 = vmul_f32(vacc2x01, vscale);
vacc3x01 = vmul_f32(vacc3x01, vscale);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 5,992 | 34.672619 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x2_t vacc0x01 = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const uint32x2_t vtmpb = vld1_dup_u32(w); w = (const int8_t*) w + 4;
const int32x4_t vtmpi = vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u32(vtmpb))));
const float32x4_t vb01c01 = vcvtq_f32_s32(vtmpi);
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const uint16x4_t vtmpb = vld1_dup_u16(w); w = (const int8_t*) w + 2;
const int32x2_t vtmpi = vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u16(vtmpb)))));
const float32x2_t vb01 = vcvt_f32_s32(vtmpi);
vacc0x01 = vmla_f32(vacc0x01, va0, vb01);
vacc1x01 = vmla_f32(vacc1x01, va1, vb01);
vacc2x01 = vmla_f32(vacc2x01, va2, vb01);
vacc3x01 = vmla_f32(vacc3x01, va3, vb01);
}
const float32x2_t vscale = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
vacc0x01 = vmul_f32(vacc0x01, vscale);
vacc1x01 = vmul_f32(vacc1x01, vscale);
vacc2x01 = vmul_f32(vacc2x01, vscale);
vacc3x01 = vmul_f32(vacc3x01, vscale);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 4,977 | 33.09589 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,380 | 27.083333 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,538 | 28.096154 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_relu_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc20 = math_max_f32(vacc20, 0.0f);
vacc21 = math_max_f32(vacc21, 0.0f);
vacc30 = math_max_f32(vacc30, 0.0f);
vacc31 = math_max_f32(vacc31, 0.0f);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,965 | 26.351724 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_relu_ukernel_4x2__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f);
vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f);
vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f);
vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f);
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 4,043 | 26.889655 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = unaligned_indexed_load_f32(w, 0);
float vacc01 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc30 = vacc00;
float vacc31 = vacc01;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
if XNN_LIKELY(nc >= 2) {
c3[0] = vacc30;
c3[1] = vacc31;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,635 | 25.540146 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2c4-minmax-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2c4__sse2(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0c4 = _mm_load_ss(w);
__m128 vacc0x1c4 = _mm_load_ss((const float*) w + 1);
__m128 vacc1x0c4 = vacc0x0c4;
__m128 vacc1x1c4 = vacc0x1c4;
__m128 vacc2x0c4 = vacc0x0c4;
__m128 vacc2x1c4 = vacc0x1c4;
__m128 vacc3x0c4 = vacc0x0c4;
__m128 vacc3x1c4 = vacc0x1c4;
w = (const float*) w + 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128i vb01 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01 = _mm_unpacklo_epi8(vb01, vb01);
const __m128 vb0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01, vbw01), 24));
const __m128 vb1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01, vbw01), 24));
w = (const int8_t*) w + 8;
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(va0, vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(va0, vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(va1, vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(va1, vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(va2, vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(va2, vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(va3, vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(va3, vb1));
}
if XNN_UNLIKELY(k != 0) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const __m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128i vb01 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01 = _mm_unpacklo_epi8(vb01, vb01);
const __m128 vb0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01, vbw01), 24));
const __m128 vb1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01, vbw01), 24));
w = (const int8_t*) w + 8;
const __m128 vmask0 = _mm_cmpeq_ps(_mm_setzero_ps(), vb0);
const __m128 vmask1 = _mm_cmpeq_ps(_mm_setzero_ps(), vb1);
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va0), vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va0), vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va1), vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va1), vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va2), vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va2), vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va3), vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va3), vb1));
}
const __m128 vacc0x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc0x0c4, vacc0x1c4), _mm_unpackhi_ps(vacc0x0c4, vacc0x1c4));
const __m128 vacc1x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc1x0c4, vacc1x1c4), _mm_unpackhi_ps(vacc1x0c4, vacc1x1c4));
const __m128 vacc2x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc2x0c4, vacc2x1c4), _mm_unpackhi_ps(vacc2x0c4, vacc2x1c4));
const __m128 vacc3x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc3x0c4, vacc3x1c4), _mm_unpackhi_ps(vacc3x0c4, vacc3x1c4));
__m128 vacc01x01 = _mm_add_ps(_mm_movelh_ps(vacc0x01c2, vacc1x01c2), _mm_movehl_ps(vacc1x01c2, vacc0x01c2));
__m128 vacc23x01 = _mm_add_ps(_mm_movelh_ps(vacc2x01c2, vacc3x01c2), _mm_movehl_ps(vacc3x01c2, vacc2x01c2));
const __m128 vscalex01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w));
const __m128 vscale2x01 = _mm_movelh_ps(vscalex01, vscalex01);
w = (const float*) w + 2;
vacc01x01 = _mm_mul_ps(vacc01x01, vscale2x01);
vacc23x01 = _mm_mul_ps(vacc23x01, vscale2x01);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc01x01 = _mm_min_ps(vacc01x01, vmax);
vacc23x01 = _mm_min_ps(vacc23x01, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc01x01 = _mm_max_ps(vacc01x01, vmin);
vacc23x01 = _mm_max_ps(vacc23x01, vmin);
if XNN_LIKELY(nc >= 2) {
_mm_storel_pi((__m64*) c2, vacc23x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
_mm_storeh_pi((__m64*) c3, vacc23x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
_mm_storel_pi((__m64*) c0, vacc01x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
_mm_storeh_pi((__m64*) c1, vacc01x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
_mm_store_ss(c2, vacc23x01);
_mm_store_ss(c3, _mm_movehl_ps(vacc23x01, vacc23x01));
_mm_store_ss(c0, vacc01x01);
_mm_store_ss(c1, _mm_movehl_ps(vacc01x01, vacc01x01));
nc = 0;
}
} while (nc != 0);
}
| 6,796 | 38.063218 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x2c4-minmax-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2c4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x2c4__sse41(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0c4 = _mm_load_ss(w);
__m128 vacc0x1c4 = _mm_load_ss((const float*) w + 1);
__m128 vacc1x0c4 = vacc0x0c4;
__m128 vacc1x1c4 = vacc0x1c4;
__m128 vacc2x0c4 = vacc0x0c4;
__m128 vacc2x1c4 = vacc0x1c4;
__m128 vacc3x0c4 = vacc0x0c4;
__m128 vacc3x1c4 = vacc0x1c4;
w = (const float*) w + 2;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128i vbi0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0 = _mm_cvtepi32_ps(vbi0);
const __m128 vb1 = _mm_cvtepi32_ps(vbi1);
w = (const int8_t*) w + 8;
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(va0, vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(va0, vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(va1, vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(va1, vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(va2, vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(va2, vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(va3, vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(va3, vb1));
}
if XNN_UNLIKELY(k != 0) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
const __m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
const __m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128i vbi0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0 = _mm_cvtepi32_ps(vbi0);
const __m128 vb1 = _mm_cvtepi32_ps(vbi1);
w = (const int8_t*) w + 8;
const __m128 vmask0 = _mm_cmpeq_ps(_mm_setzero_ps(), vb0);
const __m128 vmask1 = _mm_cmpeq_ps(_mm_setzero_ps(), vb1);
vacc0x0c4 = _mm_add_ps(vacc0x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va0), vb0));
vacc0x1c4 = _mm_add_ps(vacc0x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va0), vb1));
vacc1x0c4 = _mm_add_ps(vacc1x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va1), vb0));
vacc1x1c4 = _mm_add_ps(vacc1x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va1), vb1));
vacc2x0c4 = _mm_add_ps(vacc2x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va2), vb0));
vacc2x1c4 = _mm_add_ps(vacc2x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va2), vb1));
vacc3x0c4 = _mm_add_ps(vacc3x0c4, _mm_mul_ps(_mm_andnot_ps(vmask0, va3), vb0));
vacc3x1c4 = _mm_add_ps(vacc3x1c4, _mm_mul_ps(_mm_andnot_ps(vmask1, va3), vb1));
}
const __m128 vacc0x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc0x0c4, vacc0x1c4), _mm_unpackhi_ps(vacc0x0c4, vacc0x1c4));
const __m128 vacc1x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc1x0c4, vacc1x1c4), _mm_unpackhi_ps(vacc1x0c4, vacc1x1c4));
const __m128 vacc2x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc2x0c4, vacc2x1c4), _mm_unpackhi_ps(vacc2x0c4, vacc2x1c4));
const __m128 vacc3x01c2 = _mm_add_ps(_mm_unpacklo_ps(vacc3x0c4, vacc3x1c4), _mm_unpackhi_ps(vacc3x0c4, vacc3x1c4));
__m128 vacc01x01 = _mm_add_ps(_mm_movelh_ps(vacc0x01c2, vacc1x01c2), _mm_movehl_ps(vacc1x01c2, vacc0x01c2));
__m128 vacc23x01 = _mm_add_ps(_mm_movelh_ps(vacc2x01c2, vacc3x01c2), _mm_movehl_ps(vacc3x01c2, vacc2x01c2));
const __m128 vscalex01 = _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*) w));
const __m128 vscale2x01 = _mm_movelh_ps(vscalex01, vscalex01);
w = (const float*) w + 2;
vacc01x01 = _mm_mul_ps(vacc01x01, vscale2x01);
vacc23x01 = _mm_mul_ps(vacc23x01, vscale2x01);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc01x01 = _mm_min_ps(vacc01x01, vmax);
vacc23x01 = _mm_min_ps(vacc23x01, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc01x01 = _mm_max_ps(vacc01x01, vmin);
vacc23x01 = _mm_max_ps(vacc23x01, vmin);
if XNN_LIKELY(nc >= 2) {
_mm_storel_pi((__m64*) c2, vacc23x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
_mm_storeh_pi((__m64*) c3, vacc23x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
_mm_storel_pi((__m64*) c0, vacc01x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
_mm_storeh_pi((__m64*) c1, vacc01x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
a1 = (const float*) ((uintptr_t) a1 - kc);
nc -= 2;
} else {
assert(nc == 1);
_mm_store_ss(c2, vacc23x01);
_mm_store_ss(c3, _mm_movehl_ps(vacc23x01, vacc23x01));
_mm_store_ss(c0, vacc01x01);
_mm_store_ss(c1, _mm_movehl_ps(vacc01x01, vacc01x01));
nc = 0;
}
} while (nc != 0);
}
| 6,729 | 37.678161 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc20 = math_max_f32(vacc20, vmin);
vacc21 = math_max_f32(vacc21, vmin);
vacc22 = math_max_f32(vacc22, vmin);
vacc23 = math_max_f32(vacc23, vmin);
vacc30 = math_max_f32(vacc30, vmin);
vacc31 = math_max_f32(vacc31, vmin);
vacc32 = math_max_f32(vacc32, vmin);
vacc33 = math_max_f32(vacc33, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
vacc20 = math_min_f32(vacc20, vmax);
vacc21 = math_min_f32(vacc21, vmax);
vacc22 = math_min_f32(vacc22, vmax);
vacc23 = math_min_f32(vacc23, vmax);
vacc30 = math_min_f32(vacc30, vmax);
vacc31 = math_min_f32(vacc31, vmax);
vacc32 = math_min_f32(vacc32, vmax);
vacc33 = math_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,557 | 28.146667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc20 = __builtin_wasm_max_f32(vacc20, vmin);
vacc21 = __builtin_wasm_max_f32(vacc21, vmin);
vacc22 = __builtin_wasm_max_f32(vacc22, vmin);
vacc23 = __builtin_wasm_max_f32(vacc23, vmin);
vacc30 = __builtin_wasm_max_f32(vacc30, vmin);
vacc31 = __builtin_wasm_max_f32(vacc31, vmin);
vacc32 = __builtin_wasm_max_f32(vacc32, vmin);
vacc33 = __builtin_wasm_max_f32(vacc33, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
vacc20 = __builtin_wasm_min_f32(vacc20, vmax);
vacc21 = __builtin_wasm_min_f32(vacc21, vmax);
vacc22 = __builtin_wasm_min_f32(vacc22, vmax);
vacc23 = __builtin_wasm_min_f32(vacc23, vmax);
vacc30 = __builtin_wasm_min_f32(vacc30, vmax);
vacc31 = __builtin_wasm_min_f32(vacc31, vmax);
vacc32 = __builtin_wasm_min_f32(vacc32, vmax);
vacc33 = __builtin_wasm_min_f32(vacc33, vmax);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 6,875 | 29.56 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc12 = math_max_f32(vacc12, 0.0f);
vacc13 = math_max_f32(vacc13, 0.0f);
vacc20 = math_max_f32(vacc20, 0.0f);
vacc21 = math_max_f32(vacc21, 0.0f);
vacc22 = math_max_f32(vacc22, 0.0f);
vacc23 = math_max_f32(vacc23, 0.0f);
vacc30 = math_max_f32(vacc30, 0.0f);
vacc31 = math_max_f32(vacc31, 0.0f);
vacc32 = math_max_f32(vacc32, 0.0f);
vacc33 = math_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,814 | 27.228155 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_relu_ukernel_4x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f);
vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f);
vacc20 = __builtin_wasm_max_f32(vacc20, 0.0f);
vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f);
vacc22 = __builtin_wasm_max_f32(vacc22, 0.0f);
vacc23 = __builtin_wasm_max_f32(vacc23, 0.0f);
vacc30 = __builtin_wasm_max_f32(vacc30, 0.0f);
vacc31 = __builtin_wasm_max_f32(vacc31, 0.0f);
vacc32 = __builtin_wasm_max_f32(vacc32, 0.0f);
vacc33 = __builtin_wasm_max_f32(vacc33, 0.0f);
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,972 | 27.995146 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_qc8w_gemm_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float vacc00 = ((const float*)w)[0];
float vacc01 = ((const float*)w)[1];
float vacc02 = ((const float*)w)[2];
float vacc03 = ((const float*)w)[3];
w = (const float*) w + 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
float vacc20 = vacc00;
float vacc21 = vacc01;
float vacc22 = vacc02;
float vacc23 = vacc03;
float vacc30 = vacc00;
float vacc31 = vacc01;
float vacc32 = vacc02;
float vacc33 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float va2 = *a2++;
const float va3 = *a3++;
const float vb0 = (float) ((const int8_t*) w)[0];
const float vb1 = (float) ((const int8_t*) w)[1];
const float vb2 = (float) ((const int8_t*) w)[2];
const float vb3 = (float) ((const int8_t*) w)[3];
w = (const int8_t*) w + 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
vacc20 = math_muladd_f32(va2, vb0, vacc20);
vacc21 = math_muladd_f32(va2, vb1, vacc21);
vacc22 = math_muladd_f32(va2, vb2, vacc22);
vacc23 = math_muladd_f32(va2, vb3, vacc23);
vacc30 = math_muladd_f32(va3, vb0, vacc30);
vacc31 = math_muladd_f32(va3, vb1, vacc31);
vacc32 = math_muladd_f32(va3, vb2, vacc32);
vacc33 = math_muladd_f32(va3, vb3, vacc33);
k -= sizeof(float);
} while (k != 0);
const float vscale0 = ((const float*)w)[0];
const float vscale1 = ((const float*)w)[1];
const float vscale2 = ((const float*)w)[2];
const float vscale3 = ((const float*)w)[3];
w = (const float*) w + 4;
vacc00 *= vscale0;
vacc10 *= vscale0;
vacc20 *= vscale0;
vacc30 *= vscale0;
vacc01 *= vscale1;
vacc11 *= vscale1;
vacc21 *= vscale1;
vacc31 *= vscale1;
vacc02 *= vscale2;
vacc12 *= vscale2;
vacc22 *= vscale2;
vacc32 *= vscale2;
vacc03 *= vscale3;
vacc13 *= vscale3;
vacc23 *= vscale3;
vacc33 *= vscale3;
if XNN_LIKELY(nc >= 4) {
c3[0] = vacc30;
c3[1] = vacc31;
c3[2] = vacc32;
c3[3] = vacc33;
c3 = (float*) ((uintptr_t) c3 + cn_stride);
c2[0] = vacc20;
c2[1] = vacc21;
c2[2] = vacc22;
c2[3] = vacc23;
c2 = (float*) ((uintptr_t) c2 + cn_stride);
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const void*) ((uintptr_t) a3 - kc);
a2 = (const void*) ((uintptr_t) a2 - kc);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = vacc30;
c3[1] = vacc31;
vacc30 = vacc32;
c3 += 2;
c2[0] = vacc20;
c2[1] = vacc21;
vacc20 = vacc22;
c2 += 2;
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c3[0] = vacc30;
c2[0] = vacc20;
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 5,156 | 26.142105 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
const int8x8_t vw01234567c2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c2 = vmovl_s8(vw01234567c2);
const int32x4_t vxw0123c2 = vmovl_s16(vget_low_s16(vxw01234567c2));
const int32x4_t vxw4567c2 = vmovl_s16(vget_high_s16(vxw01234567c2));
const float32x4_t vb0123c2 = vcvtq_f32_s32(vxw0123c2);
const float32x4_t vb4567c2 = vcvtq_f32_s32(vxw4567c2);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
const int8x8_t vw01234567c3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c3 = vmovl_s8(vw01234567c3);
const int32x4_t vxw0123c3 = vmovl_s16(vget_low_s16(vxw01234567c3));
const int32x4_t vxw4567c3 = vmovl_s16(vget_high_s16(vxw01234567c3));
const float32x4_t vb0123c3 = vcvtq_f32_s32(vxw0123c3);
const float32x4_t vb4567c3 = vcvtq_f32_s32(vxw4567c3);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567 = vmovl_s8(vw01234567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale4567 = vld1q_f32(w); w = ((const float*) w + 4);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,862 | 41.268482 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,607 | 38.127273 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,628 | 31.726744 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,947 | 38.245614 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,596 | 38.077273 | 98 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.