repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neonfp16-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neonfp16_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vf0 = vld1q_f32(input); input += 4;
const float32x4_t vf1 = vld1q_f32(input); input += 4;
const float32x4_t vf2 = vld1q_f32(input); input += 4;
const float32x4_t vf3 = vld1q_f32(input); input += 4;
const uint16x8_t vh0 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf0), vcvt_f16_f32(vf1)));
const uint16x8_t vh1 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf2), vcvt_f16_f32(vf3)));
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vf = vld1q_f32(input); input += 4;
const uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vf = vld1q_f32(input);
uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 2,010 | 29.469697 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-neonfp16-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__neonfp16_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vf0 = vld1q_f32(input); input += 4;
const float32x4_t vf1 = vld1q_f32(input); input += 4;
const uint16x8_t vh0 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf0), vcvt_f16_f32(vf1)));
vst1q_u16(o, vh0); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vf = vld1q_f32(input); input += 4;
const uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vf = vld1q_f32(input);
uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 1,758 | 27.370968 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-bitcast-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-bitcast.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_bitcast_x1(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vnonsign_mask = params->scalar_bitcast.nonsign_mask;
const uint32_t vexp_bias = params->scalar_bitcast.exp_bias;
const float vscale_to_inf = params->scalar_bitcast.scale_to_inf;
const uint32_t vexpw_max = params->scalar_bitcast.expw_max;
const float vscale_to_zero = params->scalar_bitcast.scale_to_zero;
const uint32_t vbias_min = params->scalar_bitcast.bias_min;
const uint16_t vexph_mask = params->scalar_bitcast.exph_mask;
const uint16_t vmanth_mask = params->scalar_bitcast.manth_mask;
const uint16_t vnanh = params->scalar_bitcast.nanh;
const uint32_t* i = (const uint32_t*) input;
uint16_t* o = (uint16_t*) output;
do {
const uint32_t vw = *i++;
const uint32_t vnonsignw = vw & vnonsign_mask;
float vf = uint32_as_float(vnonsignw);
const uint32_t vsignw = vw ^ vnonsignw;
uint32_t vbias = vnonsignw + vexp_bias;
vf *= vscale_to_inf;
vbias &= vexpw_max;
vf *= vscale_to_zero;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,155 | 28.135135 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-bitcast-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-bitcast.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_bitcast_x2(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vnonsign_mask = params->scalar_bitcast.nonsign_mask;
const uint32_t vexp_bias = params->scalar_bitcast.exp_bias;
const float vscale_to_inf = params->scalar_bitcast.scale_to_inf;
const uint32_t vexpw_max = params->scalar_bitcast.expw_max;
const float vscale_to_zero = params->scalar_bitcast.scale_to_zero;
const uint32_t vbias_min = params->scalar_bitcast.bias_min;
const uint16_t vexph_mask = params->scalar_bitcast.exph_mask;
const uint16_t vmanth_mask = params->scalar_bitcast.manth_mask;
const uint16_t vnanh = params->scalar_bitcast.nanh;
const uint32_t* i = (const uint32_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const uint32_t vw0 = i[0];
const uint32_t vw1 = i[1];
i += 2;
const uint32_t vnonsignw0 = vw0 & vnonsign_mask;
const uint32_t vnonsignw1 = vw1 & vnonsign_mask;
float vf0 = uint32_as_float(vnonsignw0);
float vf1 = uint32_as_float(vnonsignw1);
const uint32_t vsignw0 = vw0 ^ vnonsignw0;
const uint32_t vsignw1 = vw1 ^ vnonsignw1;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
vf0 *= vscale_to_inf;
vf1 *= vscale_to_inf;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
o[0] = vh0;
o[1] = vh1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
const uint32_t vw = *i;
const uint32_t vnonsignw = vw & vnonsign_mask;
float vf = uint32_as_float(vnonsignw);
const uint32_t vsignw = vw ^ vnonsignw;
uint32_t vbias = vnonsignw + vexp_bias;
vf *= vscale_to_inf;
vbias &= vexpw_max;
vf *= vscale_to_zero;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o = vh;
}
}
| 3,767 | 29.144 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-bitcast-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-bitcast.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_bitcast_x3(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vnonsign_mask = params->scalar_bitcast.nonsign_mask;
const uint32_t vexp_bias = params->scalar_bitcast.exp_bias;
const float vscale_to_inf = params->scalar_bitcast.scale_to_inf;
const uint32_t vexpw_max = params->scalar_bitcast.expw_max;
const float vscale_to_zero = params->scalar_bitcast.scale_to_zero;
const uint32_t vbias_min = params->scalar_bitcast.bias_min;
const uint16_t vexph_mask = params->scalar_bitcast.exph_mask;
const uint16_t vmanth_mask = params->scalar_bitcast.manth_mask;
const uint16_t vnanh = params->scalar_bitcast.nanh;
const uint32_t* i = (const uint32_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const uint32_t vw0 = i[0];
const uint32_t vw1 = i[1];
const uint32_t vw2 = i[2];
i += 3;
const uint32_t vnonsignw0 = vw0 & vnonsign_mask;
const uint32_t vnonsignw1 = vw1 & vnonsign_mask;
const uint32_t vnonsignw2 = vw2 & vnonsign_mask;
float vf0 = uint32_as_float(vnonsignw0);
float vf1 = uint32_as_float(vnonsignw1);
float vf2 = uint32_as_float(vnonsignw2);
const uint32_t vsignw0 = vw0 ^ vnonsignw0;
const uint32_t vsignw1 = vw1 ^ vnonsignw1;
const uint32_t vsignw2 = vw2 ^ vnonsignw2;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
uint32_t vbias2 = vnonsignw2 + vexp_bias;
vf0 *= vscale_to_inf;
vf1 *= vscale_to_inf;
vf2 *= vscale_to_inf;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vbias2 &= vexpw_max;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vf2 *= vscale_to_zero;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vbias2 = math_max_u32(vbias2, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
vf2 += uint32_as_float(vbias2);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint32_t vbits2 = float_as_uint32(vf2);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vexph2 = (uint16_t) (vbits2 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vmanth2 = (uint16_t) vbits2 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
const uint16_t vsignh2 = (uint16_t) (vsignw2 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
uint16_t vh2 = vexph2 + vmanth2;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw2 > vexpw_max) {
vh2 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
vh2 |= vsignh2;
o[0] = vh0;
o[1] = vh1;
o[2] = vh2;
o += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vw = *i++;
const uint32_t vnonsignw = vw & vnonsign_mask;
float vf = uint32_as_float(vnonsignw);
const uint32_t vsignw = vw ^ vnonsignw;
uint32_t vbias = vnonsignw + vexp_bias;
vf *= vscale_to_inf;
vbias &= vexpw_max;
vf *= vscale_to_zero;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,645 | 30.181208 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-bitcast-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-bitcast.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_bitcast_x4(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vnonsign_mask = params->scalar_bitcast.nonsign_mask;
const uint32_t vexp_bias = params->scalar_bitcast.exp_bias;
const float vscale_to_inf = params->scalar_bitcast.scale_to_inf;
const uint32_t vexpw_max = params->scalar_bitcast.expw_max;
const float vscale_to_zero = params->scalar_bitcast.scale_to_zero;
const uint32_t vbias_min = params->scalar_bitcast.bias_min;
const uint16_t vexph_mask = params->scalar_bitcast.exph_mask;
const uint16_t vmanth_mask = params->scalar_bitcast.manth_mask;
const uint16_t vnanh = params->scalar_bitcast.nanh;
const uint32_t* i = (const uint32_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const uint32_t vw0 = i[0];
const uint32_t vw1 = i[1];
const uint32_t vw2 = i[2];
const uint32_t vw3 = i[3];
i += 4;
const uint32_t vnonsignw0 = vw0 & vnonsign_mask;
const uint32_t vnonsignw1 = vw1 & vnonsign_mask;
const uint32_t vnonsignw2 = vw2 & vnonsign_mask;
const uint32_t vnonsignw3 = vw3 & vnonsign_mask;
float vf0 = uint32_as_float(vnonsignw0);
float vf1 = uint32_as_float(vnonsignw1);
float vf2 = uint32_as_float(vnonsignw2);
float vf3 = uint32_as_float(vnonsignw3);
const uint32_t vsignw0 = vw0 ^ vnonsignw0;
const uint32_t vsignw1 = vw1 ^ vnonsignw1;
const uint32_t vsignw2 = vw2 ^ vnonsignw2;
const uint32_t vsignw3 = vw3 ^ vnonsignw3;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
uint32_t vbias2 = vnonsignw2 + vexp_bias;
uint32_t vbias3 = vnonsignw3 + vexp_bias;
vf0 *= vscale_to_inf;
vf1 *= vscale_to_inf;
vf2 *= vscale_to_inf;
vf3 *= vscale_to_inf;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vbias2 &= vexpw_max;
vbias3 &= vexpw_max;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vf2 *= vscale_to_zero;
vf3 *= vscale_to_zero;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vbias2 = math_max_u32(vbias2, vbias_min);
vbias3 = math_max_u32(vbias3, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
vf2 += uint32_as_float(vbias2);
vf3 += uint32_as_float(vbias3);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint32_t vbits2 = float_as_uint32(vf2);
const uint32_t vbits3 = float_as_uint32(vf3);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vexph2 = (uint16_t) (vbits2 >> 13) & vexph_mask;
const uint16_t vexph3 = (uint16_t) (vbits3 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vmanth2 = (uint16_t) vbits2 & vmanth_mask;
const uint16_t vmanth3 = (uint16_t) vbits3 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
const uint16_t vsignh2 = (uint16_t) (vsignw2 >> 16);
const uint16_t vsignh3 = (uint16_t) (vsignw3 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
uint16_t vh2 = vexph2 + vmanth2;
uint16_t vh3 = vexph3 + vmanth3;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw2 > vexpw_max) {
vh2 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw3 > vexpw_max) {
vh3 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
vh2 |= vsignh2;
vh3 |= vsignh3;
o[0] = vh0;
o[1] = vh1;
o[2] = vh2;
o[3] = vh3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint32_t vw = *i++;
const uint32_t vnonsignw = vw & vnonsign_mask;
float vf = uint32_as_float(vnonsignw);
const uint32_t vsignw = vw ^ vnonsignw;
uint32_t vbias = vnonsignw + vexp_bias;
vf *= vscale_to_inf;
vbias &= vexpw_max;
vf *= vscale_to_zero;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,413 | 31.035503 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-fabsf-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-fabsf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_fabsf_x1(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale_to_inf = params->scalar_fabsf.scale_to_inf;
const uint32_t vexp_bias = params->scalar_fabsf.exp_bias;
const float vscale_to_zero = params->scalar_fabsf.scale_to_zero;
const uint32_t vexpw_max = params->scalar_fabsf.expw_max;
const uint32_t vbias_min = params->scalar_fabsf.bias_min;
const uint16_t vexph_mask = params->scalar_fabsf.exph_mask;
const uint16_t vmanth_mask = params->scalar_fabsf.manth_mask;
const uint16_t vnanh = params->scalar_fabsf.nanh;
uint16_t* o = (uint16_t*) output;
do {
const float vx = *input++;
const float vabsx = fabsf(vx);
uint32_t vsignw = float_as_uint32(vx);
const uint32_t vnonsignw = float_as_uint32(vabsx);
float vf = vabsx * vscale_to_inf;
uint32_t vbias = vnonsignw + vexp_bias;
vsignw ^= vnonsignw;
vf *= vscale_to_zero;
vbias &= vexpw_max;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,071 | 26.626667 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-fabsf-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-fabsf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_fabsf_x2(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale_to_inf = params->scalar_fabsf.scale_to_inf;
const uint32_t vexp_bias = params->scalar_fabsf.exp_bias;
const float vscale_to_zero = params->scalar_fabsf.scale_to_zero;
const uint32_t vexpw_max = params->scalar_fabsf.expw_max;
const uint32_t vbias_min = params->scalar_fabsf.bias_min;
const uint16_t vexph_mask = params->scalar_fabsf.exph_mask;
const uint16_t vmanth_mask = params->scalar_fabsf.manth_mask;
const uint16_t vnanh = params->scalar_fabsf.nanh;
uint16_t* o = (uint16_t*) output;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vabsx0 = fabsf(vx0);
const float vabsx1 = fabsf(vx1);
uint32_t vsignw0 = float_as_uint32(vx0);
uint32_t vsignw1 = float_as_uint32(vx1);
const uint32_t vnonsignw0 = float_as_uint32(vabsx0);
const uint32_t vnonsignw1 = float_as_uint32(vabsx1);
float vf0 = vabsx0 * vscale_to_inf;
float vf1 = vabsx1 * vscale_to_inf;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
vsignw0 ^= vnonsignw0;
vsignw1 ^= vnonsignw1;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
o[0] = vh0;
o[1] = vh1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vabsx = fabsf(vx);
uint32_t vsignw = float_as_uint32(vx);
const uint32_t vnonsignw = float_as_uint32(vabsx);
float vf = vabsx * vscale_to_inf;
uint32_t vbias = vnonsignw + vexp_bias;
vsignw ^= vnonsignw;
vf *= vscale_to_zero;
vbias &= vexpw_max;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o = vh;
}
}
| 3,760 | 28.155039 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-fabsf-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-fabsf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_fabsf_x3(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale_to_inf = params->scalar_fabsf.scale_to_inf;
const uint32_t vexp_bias = params->scalar_fabsf.exp_bias;
const float vscale_to_zero = params->scalar_fabsf.scale_to_zero;
const uint32_t vexpw_max = params->scalar_fabsf.expw_max;
const uint32_t vbias_min = params->scalar_fabsf.bias_min;
const uint16_t vexph_mask = params->scalar_fabsf.exph_mask;
const uint16_t vmanth_mask = params->scalar_fabsf.manth_mask;
const uint16_t vnanh = params->scalar_fabsf.nanh;
uint16_t* o = (uint16_t*) output;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
input += 3;
const float vabsx0 = fabsf(vx0);
const float vabsx1 = fabsf(vx1);
const float vabsx2 = fabsf(vx2);
uint32_t vsignw0 = float_as_uint32(vx0);
uint32_t vsignw1 = float_as_uint32(vx1);
uint32_t vsignw2 = float_as_uint32(vx2);
const uint32_t vnonsignw0 = float_as_uint32(vabsx0);
const uint32_t vnonsignw1 = float_as_uint32(vabsx1);
const uint32_t vnonsignw2 = float_as_uint32(vabsx2);
float vf0 = vabsx0 * vscale_to_inf;
float vf1 = vabsx1 * vscale_to_inf;
float vf2 = vabsx2 * vscale_to_inf;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
uint32_t vbias2 = vnonsignw2 + vexp_bias;
vsignw0 ^= vnonsignw0;
vsignw1 ^= vnonsignw1;
vsignw2 ^= vnonsignw2;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vf2 *= vscale_to_zero;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vbias2 &= vexpw_max;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vbias2 = math_max_u32(vbias2, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
vf2 += uint32_as_float(vbias2);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint32_t vbits2 = float_as_uint32(vf2);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vexph2 = (uint16_t) (vbits2 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vmanth2 = (uint16_t) vbits2 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
const uint16_t vsignh2 = (uint16_t) (vsignw2 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
uint16_t vh2 = vexph2 + vmanth2;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw2 > vexpw_max) {
vh2 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
vh2 |= vsignh2;
o[0] = vh0;
o[1] = vh1;
o[2] = vh2;
o += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vabsx = fabsf(vx);
uint32_t vsignw = float_as_uint32(vx);
const uint32_t vnonsignw = float_as_uint32(vabsx);
float vf = vabsx * vscale_to_inf;
uint32_t vbias = vnonsignw + vexp_bias;
vsignw ^= vnonsignw;
vf *= vscale_to_zero;
vbias &= vexpw_max;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,676 | 29.37013 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-scalar-fabsf-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/scalar-fabsf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__scalar_fabsf_x4(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale_to_inf = params->scalar_fabsf.scale_to_inf;
const uint32_t vexp_bias = params->scalar_fabsf.exp_bias;
const float vscale_to_zero = params->scalar_fabsf.scale_to_zero;
const uint32_t vexpw_max = params->scalar_fabsf.expw_max;
const uint32_t vbias_min = params->scalar_fabsf.bias_min;
const uint16_t vexph_mask = params->scalar_fabsf.exph_mask;
const uint16_t vmanth_mask = params->scalar_fabsf.manth_mask;
const uint16_t vnanh = params->scalar_fabsf.nanh;
uint16_t* o = (uint16_t*) output;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vabsx0 = fabsf(vx0);
const float vabsx1 = fabsf(vx1);
const float vabsx2 = fabsf(vx2);
const float vabsx3 = fabsf(vx3);
uint32_t vsignw0 = float_as_uint32(vx0);
uint32_t vsignw1 = float_as_uint32(vx1);
uint32_t vsignw2 = float_as_uint32(vx2);
uint32_t vsignw3 = float_as_uint32(vx3);
const uint32_t vnonsignw0 = float_as_uint32(vabsx0);
const uint32_t vnonsignw1 = float_as_uint32(vabsx1);
const uint32_t vnonsignw2 = float_as_uint32(vabsx2);
const uint32_t vnonsignw3 = float_as_uint32(vabsx3);
float vf0 = vabsx0 * vscale_to_inf;
float vf1 = vabsx1 * vscale_to_inf;
float vf2 = vabsx2 * vscale_to_inf;
float vf3 = vabsx3 * vscale_to_inf;
uint32_t vbias0 = vnonsignw0 + vexp_bias;
uint32_t vbias1 = vnonsignw1 + vexp_bias;
uint32_t vbias2 = vnonsignw2 + vexp_bias;
uint32_t vbias3 = vnonsignw3 + vexp_bias;
vsignw0 ^= vnonsignw0;
vsignw1 ^= vnonsignw1;
vsignw2 ^= vnonsignw2;
vsignw3 ^= vnonsignw3;
vf0 *= vscale_to_zero;
vf1 *= vscale_to_zero;
vf2 *= vscale_to_zero;
vf3 *= vscale_to_zero;
vbias0 &= vexpw_max;
vbias1 &= vexpw_max;
vbias2 &= vexpw_max;
vbias3 &= vexpw_max;
vbias0 = math_max_u32(vbias0, vbias_min);
vbias1 = math_max_u32(vbias1, vbias_min);
vbias2 = math_max_u32(vbias2, vbias_min);
vbias3 = math_max_u32(vbias3, vbias_min);
vf0 += uint32_as_float(vbias0);
vf1 += uint32_as_float(vbias1);
vf2 += uint32_as_float(vbias2);
vf3 += uint32_as_float(vbias3);
const uint32_t vbits0 = float_as_uint32(vf0);
const uint32_t vbits1 = float_as_uint32(vf1);
const uint32_t vbits2 = float_as_uint32(vf2);
const uint32_t vbits3 = float_as_uint32(vf3);
const uint16_t vexph0 = (uint16_t) (vbits0 >> 13) & vexph_mask;
const uint16_t vexph1 = (uint16_t) (vbits1 >> 13) & vexph_mask;
const uint16_t vexph2 = (uint16_t) (vbits2 >> 13) & vexph_mask;
const uint16_t vexph3 = (uint16_t) (vbits3 >> 13) & vexph_mask;
const uint16_t vmanth0 = (uint16_t) vbits0 & vmanth_mask;
const uint16_t vmanth1 = (uint16_t) vbits1 & vmanth_mask;
const uint16_t vmanth2 = (uint16_t) vbits2 & vmanth_mask;
const uint16_t vmanth3 = (uint16_t) vbits3 & vmanth_mask;
const uint16_t vsignh0 = (uint16_t) (vsignw0 >> 16);
const uint16_t vsignh1 = (uint16_t) (vsignw1 >> 16);
const uint16_t vsignh2 = (uint16_t) (vsignw2 >> 16);
const uint16_t vsignh3 = (uint16_t) (vsignw3 >> 16);
uint16_t vh0 = vexph0 + vmanth0;
uint16_t vh1 = vexph1 + vmanth1;
uint16_t vh2 = vexph2 + vmanth2;
uint16_t vh3 = vexph3 + vmanth3;
if XNN_UNPREDICTABLE(vnonsignw0 > vexpw_max) {
vh0 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw1 > vexpw_max) {
vh1 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw2 > vexpw_max) {
vh2 = vnanh;
}
if XNN_UNPREDICTABLE(vnonsignw3 > vexpw_max) {
vh3 = vnanh;
}
vh0 |= vsignh0;
vh1 |= vsignh1;
vh2 |= vsignh2;
vh3 |= vsignh3;
o[0] = vh0;
o[1] = vh1;
o[2] = vh2;
o[3] = vh3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vabsx = fabsf(vx);
uint32_t vsignw = float_as_uint32(vx);
const uint32_t vnonsignw = float_as_uint32(vabsx);
float vf = vabsx * vscale_to_inf;
uint32_t vbias = vnonsignw + vexp_bias;
vsignw ^= vnonsignw;
vf *= vscale_to_zero;
vbias &= vexpw_max;
vbias = math_max_u32(vbias, vbias_min);
vf += uint32_as_float(vbias);
const uint32_t vbits = float_as_uint32(vf);
const uint16_t vexph = (uint16_t) (vbits >> 13) & vexph_mask;
const uint16_t vmanth = (uint16_t) vbits & vmanth_mask;
const uint16_t vsignh = (uint16_t) (vsignw >> 16);
uint16_t vh = vexph + vmanth;
if XNN_UNPREDICTABLE(vnonsignw > vexpw_max) {
vh = vnanh;
}
vh |= vsignh;
*o++ = vh;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,480 | 30.32 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse2_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
__m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
__m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vh0 = _mm_or_si128(vh0, vsignh0);
vh1 = _mm_or_si128(vh1, vsignh1);
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_cvtsi128_si32(vh);
}
}
}
| 10,720 | 42.404858 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse2_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
__m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
__m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
__m128i vh2 = _mm_and_si128(vnanh, vnanmaskh2);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
vh0 = _mm_or_si128(vh0, vsignh0);
vh1 = _mm_or_si128(vh1, vsignh1);
vh2 = _mm_or_si128(vh2, vsignh2);
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
vh2 = _mm_or_si128(vh2, _mm_andnot_si128(vnanmaskh2, vnonsignh2));
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
o += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_cvtsi128_si32(vh);
}
}
}
| 12,798 | 44.386525 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse2_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
const __m128 vx6 = _mm_loadu_ps(input + 24);
const __m128 vx7 = _mm_loadu_ps(input + 28);
input += 32;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vabsx6 = _mm_and_ps(vx6, vnonsign_mask);
const __m128 vabsx7 = _mm_and_ps(vx7, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
const __m128 vsignx6 = _mm_xor_ps(vx6, vabsx6);
const __m128 vsignx7 = _mm_xor_ps(vx7, vabsx7);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128i vbias6 = _mm_add_epi32(_mm_castps_si128(vabsx6), vexp_bias);
__m128i vbias7 = _mm_add_epi32(_mm_castps_si128(vabsx7), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
__m128 vf6 = _mm_mul_ps(vabsx6, vscale_to_inf);
__m128 vf7 = _mm_mul_ps(vabsx7, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
const __m128i vnanmaskw6 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx6), vexpw_max);
const __m128i vnanmaskw7 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx7), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vbias6 = _mm_and_si128(vbias6, vexpw_max);
vbias7 = _mm_and_si128(vbias7, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
vf6 = _mm_mul_ps(vf6, vscale_to_zero);
vf7 = _mm_mul_ps(vf7, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vnanmaskh3 = _mm_packs_epi32(vnanmaskw6, vnanmaskw7);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
const __m128i vsignh3 = _mm_packs_epi32(_mm_castps_si128(vsignx6), _mm_castps_si128(vsignx7));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
vbias6 = _mm_max_epi16(vbias6, vbias_min);
vbias7 = _mm_max_epi16(vbias7, vbias_min);
__m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
__m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
__m128i vh2 = _mm_and_si128(vnanh, vnanmaskh2);
__m128i vh3 = _mm_and_si128(vnanh, vnanmaskh3);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
vf6 = _mm_add_ps(vf6, _mm_castsi128_ps(vbias6));
vf7 = _mm_add_ps(vf7, _mm_castsi128_ps(vbias7));
vh0 = _mm_or_si128(vh0, vsignh0);
vh1 = _mm_or_si128(vh1, vsignh1);
vh2 = _mm_or_si128(vh2, vsignh2);
vh3 = _mm_or_si128(vh3, vsignh3);
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
__m128i vexpw6 = _mm_srli_epi32(_mm_castps_si128(vf6), 13);
__m128i vexpw7 = _mm_srli_epi32(_mm_castps_si128(vf7), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
const __m128i vmantw6 = _mm_and_si128(_mm_castps_si128(vf6), vmanth_mask);
const __m128i vmantw7 = _mm_and_si128(_mm_castps_si128(vf7), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
vexpw6 = _mm_and_si128(vexpw6, vexph_mask);
vexpw7 = _mm_and_si128(vexpw7, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignw6 = _mm_add_epi32(vmantw6, vexpw6);
const __m128i vnonsignw7 = _mm_add_epi32(vmantw7, vexpw7);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
const __m128i vnonsignh3 = _mm_packs_epi32(vnonsignw6, vnonsignw7);
vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
vh2 = _mm_or_si128(vh2, _mm_andnot_si128(vnanmaskh2, vnonsignh2));
vh3 = _mm_or_si128(vh3, _mm_andnot_si128(vnanmaskh3, vnonsignh3));
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
o += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_cvtsi128_si32(vh);
}
}
}
| 14,876 | 45.930599 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse2_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
__m128i vh = _mm_and_si128(vnanh, vnanmaskh);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
vh = _mm_or_si128(vh, vsignh);
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_cvtsi128_si32(vh);
}
}
}
| 6,455 | 41.196078 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse41_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,603 | 42.105691 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse41-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse41_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vabsh2 = _mm_blendv_epi8(vnonsignh2, vnanh, vnanmaskh2);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
const __m128i vh2 = _mm_or_si128(vabsh2, vsignh2);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
o += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 12,650 | 44.182143 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse41_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
const __m128 vx4 = _mm_loadu_ps(input + 16);
const __m128 vx5 = _mm_loadu_ps(input + 20);
const __m128 vx6 = _mm_loadu_ps(input + 24);
const __m128 vx7 = _mm_loadu_ps(input + 28);
input += 32;
const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
const __m128 vabsx6 = _mm_and_ps(vx6, vnonsign_mask);
const __m128 vabsx7 = _mm_and_ps(vx7, vnonsign_mask);
const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
const __m128 vsignx6 = _mm_xor_ps(vx6, vabsx6);
const __m128 vsignx7 = _mm_xor_ps(vx7, vabsx7);
__m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
__m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
__m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
__m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
__m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
__m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
__m128i vbias6 = _mm_add_epi32(_mm_castps_si128(vabsx6), vexp_bias);
__m128i vbias7 = _mm_add_epi32(_mm_castps_si128(vabsx7), vexp_bias);
__m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
__m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
__m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
__m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
__m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
__m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
__m128 vf6 = _mm_mul_ps(vabsx6, vscale_to_inf);
__m128 vf7 = _mm_mul_ps(vabsx7, vscale_to_inf);
const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
const __m128i vnanmaskw6 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx6), vexpw_max);
const __m128i vnanmaskw7 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx7), vexpw_max);
vbias0 = _mm_and_si128(vbias0, vexpw_max);
vbias1 = _mm_and_si128(vbias1, vexpw_max);
vbias2 = _mm_and_si128(vbias2, vexpw_max);
vbias3 = _mm_and_si128(vbias3, vexpw_max);
vbias4 = _mm_and_si128(vbias4, vexpw_max);
vbias5 = _mm_and_si128(vbias5, vexpw_max);
vbias6 = _mm_and_si128(vbias6, vexpw_max);
vbias7 = _mm_and_si128(vbias7, vexpw_max);
vf0 = _mm_mul_ps(vf0, vscale_to_zero);
vf1 = _mm_mul_ps(vf1, vscale_to_zero);
vf2 = _mm_mul_ps(vf2, vscale_to_zero);
vf3 = _mm_mul_ps(vf3, vscale_to_zero);
vf4 = _mm_mul_ps(vf4, vscale_to_zero);
vf5 = _mm_mul_ps(vf5, vscale_to_zero);
vf6 = _mm_mul_ps(vf6, vscale_to_zero);
vf7 = _mm_mul_ps(vf7, vscale_to_zero);
const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
const __m128i vnanmaskh3 = _mm_packs_epi32(vnanmaskw6, vnanmaskw7);
const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
const __m128i vsignh3 = _mm_packs_epi32(_mm_castps_si128(vsignx6), _mm_castps_si128(vsignx7));
vbias0 = _mm_max_epi16(vbias0, vbias_min);
vbias1 = _mm_max_epi16(vbias1, vbias_min);
vbias2 = _mm_max_epi16(vbias2, vbias_min);
vbias3 = _mm_max_epi16(vbias3, vbias_min);
vbias4 = _mm_max_epi16(vbias4, vbias_min);
vbias5 = _mm_max_epi16(vbias5, vbias_min);
vbias6 = _mm_max_epi16(vbias6, vbias_min);
vbias7 = _mm_max_epi16(vbias7, vbias_min);
vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
vf6 = _mm_add_ps(vf6, _mm_castsi128_ps(vbias6));
vf7 = _mm_add_ps(vf7, _mm_castsi128_ps(vbias7));
__m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
__m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
__m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
__m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
__m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
__m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
__m128i vexpw6 = _mm_srli_epi32(_mm_castps_si128(vf6), 13);
__m128i vexpw7 = _mm_srli_epi32(_mm_castps_si128(vf7), 13);
const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
const __m128i vmantw6 = _mm_and_si128(_mm_castps_si128(vf6), vmanth_mask);
const __m128i vmantw7 = _mm_and_si128(_mm_castps_si128(vf7), vmanth_mask);
vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
vexpw6 = _mm_and_si128(vexpw6, vexph_mask);
vexpw7 = _mm_and_si128(vexpw7, vexph_mask);
const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
const __m128i vnonsignw6 = _mm_add_epi32(vmantw6, vexpw6);
const __m128i vnonsignw7 = _mm_add_epi32(vmantw7, vexpw7);
const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
const __m128i vnonsignh3 = _mm_packs_epi32(vnonsignw6, vnonsignw7);
const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
const __m128i vabsh2 = _mm_blendv_epi8(vnonsignh2, vnanh, vnanmaskh2);
const __m128i vabsh3 = _mm_blendv_epi8(vnonsignh3, vnanh, vnanmaskh3);
const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
const __m128i vh2 = _mm_or_si128(vabsh2, vsignh2);
const __m128i vh3 = _mm_or_si128(vabsh3, vsignh3);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
o += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 14,697 | 45.808917 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__sse41_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128 vx_lo = _mm_loadu_ps(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const __m128 vx_hi = _mm_loadu_ps(input_hi);
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
__m128i vh = _mm_or_si128(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,399 | 40.830065 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmrelaxedsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmrelaxedsimd_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
input += 16;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vabsh0 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 9,754 | 39.309917 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmrelaxedsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmrelaxedsimd_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
const v128_t vx4 = wasm_v128_load(input + 16);
const v128_t vx5 = wasm_v128_load(input + 20);
input += 24;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vabsx4 = wasm_f32x4_abs(vx4);
const v128_t vabsx5 = wasm_f32x4_abs(vx5);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vbias4 = wasm_v128_and(vbias4, vexpw_max);
vbias5 = wasm_v128_and(vbias5, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vbias4 = wasm_i16x8_max(vbias4, vbias_min);
vbias5 = wasm_i16x8_max(vbias5, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
vf4 = wasm_f32x4_add(vf4, vbias4);
vf5 = wasm_f32x4_add(vf5, vbias5);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
const v128_t vabsh0 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vabsh2 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh2, vnanmaskh2);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
wasm_v128_store(o + 16, vh2);
o += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 11,618 | 41.097826 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmrelaxedsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmrelaxedsimd_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
const v128_t vx4 = wasm_v128_load(input + 16);
const v128_t vx5 = wasm_v128_load(input + 20);
const v128_t vx6 = wasm_v128_load(input + 24);
const v128_t vx7 = wasm_v128_load(input + 28);
input += 32;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vabsx4 = wasm_f32x4_abs(vx4);
const v128_t vabsx5 = wasm_f32x4_abs(vx5);
const v128_t vabsx6 = wasm_f32x4_abs(vx6);
const v128_t vabsx7 = wasm_f32x4_abs(vx7);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
const v128_t vsignx6 = wasm_v128_xor(vx6, vabsx6);
const v128_t vsignx7 = wasm_v128_xor(vx7, vabsx7);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
v128_t vbias6 = wasm_i32x4_add(vabsx6, vexp_bias);
v128_t vbias7 = wasm_i32x4_add(vabsx7, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
v128_t vf6 = wasm_f32x4_mul(vabsx6, vscale_to_inf);
v128_t vf7 = wasm_f32x4_mul(vabsx7, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
const v128_t vnanmaskw6 = wasm_i32x4_gt(vabsx6, vexpw_max);
const v128_t vnanmaskw7 = wasm_i32x4_gt(vabsx7, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vbias4 = wasm_v128_and(vbias4, vexpw_max);
vbias5 = wasm_v128_and(vbias5, vexpw_max);
vbias6 = wasm_v128_and(vbias6, vexpw_max);
vbias7 = wasm_v128_and(vbias7, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
vf6 = wasm_f32x4_mul(vf6, vscale_to_zero);
vf7 = wasm_f32x4_mul(vf7, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
const v128_t vnanmaskh3 = wasm_i16x8_narrow_i32x4(vnanmaskw6, vnanmaskw7);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
const v128_t vsignh3 = wasm_i16x8_narrow_i32x4(vsignx6, vsignx7);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vbias4 = wasm_i16x8_max(vbias4, vbias_min);
vbias5 = wasm_i16x8_max(vbias5, vbias_min);
vbias6 = wasm_i16x8_max(vbias6, vbias_min);
vbias7 = wasm_i16x8_max(vbias7, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
vf4 = wasm_f32x4_add(vf4, vbias4);
vf5 = wasm_f32x4_add(vf5, vbias5);
vf6 = wasm_f32x4_add(vf6, vbias6);
vf7 = wasm_f32x4_add(vf7, vbias7);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
v128_t vexpw6 = wasm_i32x4_shr(vf6, 13);
v128_t vexpw7 = wasm_i32x4_shr(vf7, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
const v128_t vmantw6 = wasm_v128_and(vf6, vmanth_mask);
const v128_t vmantw7 = wasm_v128_and(vf7, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
vexpw6 = wasm_v128_and(vexpw6, vexph_mask);
vexpw7 = wasm_v128_and(vexpw7, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
const v128_t vnonsignw6 = wasm_i32x4_add(vmantw6, vexpw6);
const v128_t vnonsignw7 = wasm_i32x4_add(vmantw7, vexpw7);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
const v128_t vnonsignh3 = wasm_i16x8_narrow_i32x4(vnonsignw6, vnonsignw7);
const v128_t vabsh0 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vabsh2 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh2, vnanmaskh2);
const v128_t vabsh3 = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh3, vnanmaskh3);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
const v128_t vh3 = wasm_v128_or(vabsh3, vsignh3);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
wasm_v128_store(o + 16, vh2);
wasm_v128_store(o + 24, vh3);
o += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 13,482 | 42.493548 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmrelaxedsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmrelaxedsimd_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = __builtin_wasm_relaxed_laneselect_i16x8(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 5,916 | 38.18543 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
input += 16;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
o += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 9,667 | 38.950413 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmsimd_x24(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
const v128_t vx4 = wasm_v128_load(input + 16);
const v128_t vx5 = wasm_v128_load(input + 20);
input += 24;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vabsx4 = wasm_f32x4_abs(vx4);
const v128_t vabsx5 = wasm_f32x4_abs(vx5);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vbias4 = wasm_v128_and(vbias4, vexpw_max);
vbias5 = wasm_v128_and(vbias5, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vbias4 = wasm_i16x8_max(vbias4, vbias_min);
vbias5 = wasm_i16x8_max(vbias5, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
vf4 = wasm_f32x4_add(vf4, vbias4);
vf5 = wasm_f32x4_add(vf5, vbias5);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vabsh2 = wasm_v128_bitselect(vnanh, vnonsignh2, vnanmaskh2);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
wasm_v128_store(o + 16, vh2);
o += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 11,511 | 40.710145 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
const v128_t vx4 = wasm_v128_load(input + 16);
const v128_t vx5 = wasm_v128_load(input + 20);
const v128_t vx6 = wasm_v128_load(input + 24);
const v128_t vx7 = wasm_v128_load(input + 28);
input += 32;
const v128_t vabsx0 = wasm_f32x4_abs(vx0);
const v128_t vabsx1 = wasm_f32x4_abs(vx1);
const v128_t vabsx2 = wasm_f32x4_abs(vx2);
const v128_t vabsx3 = wasm_f32x4_abs(vx3);
const v128_t vabsx4 = wasm_f32x4_abs(vx4);
const v128_t vabsx5 = wasm_f32x4_abs(vx5);
const v128_t vabsx6 = wasm_f32x4_abs(vx6);
const v128_t vabsx7 = wasm_f32x4_abs(vx7);
const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
const v128_t vsignx6 = wasm_v128_xor(vx6, vabsx6);
const v128_t vsignx7 = wasm_v128_xor(vx7, vabsx7);
v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
v128_t vbias6 = wasm_i32x4_add(vabsx6, vexp_bias);
v128_t vbias7 = wasm_i32x4_add(vabsx7, vexp_bias);
v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
v128_t vf6 = wasm_f32x4_mul(vabsx6, vscale_to_inf);
v128_t vf7 = wasm_f32x4_mul(vabsx7, vscale_to_inf);
const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
const v128_t vnanmaskw6 = wasm_i32x4_gt(vabsx6, vexpw_max);
const v128_t vnanmaskw7 = wasm_i32x4_gt(vabsx7, vexpw_max);
vbias0 = wasm_v128_and(vbias0, vexpw_max);
vbias1 = wasm_v128_and(vbias1, vexpw_max);
vbias2 = wasm_v128_and(vbias2, vexpw_max);
vbias3 = wasm_v128_and(vbias3, vexpw_max);
vbias4 = wasm_v128_and(vbias4, vexpw_max);
vbias5 = wasm_v128_and(vbias5, vexpw_max);
vbias6 = wasm_v128_and(vbias6, vexpw_max);
vbias7 = wasm_v128_and(vbias7, vexpw_max);
vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
vf6 = wasm_f32x4_mul(vf6, vscale_to_zero);
vf7 = wasm_f32x4_mul(vf7, vscale_to_zero);
const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
const v128_t vnanmaskh3 = wasm_i16x8_narrow_i32x4(vnanmaskw6, vnanmaskw7);
const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
const v128_t vsignh3 = wasm_i16x8_narrow_i32x4(vsignx6, vsignx7);
vbias0 = wasm_i16x8_max(vbias0, vbias_min);
vbias1 = wasm_i16x8_max(vbias1, vbias_min);
vbias2 = wasm_i16x8_max(vbias2, vbias_min);
vbias3 = wasm_i16x8_max(vbias3, vbias_min);
vbias4 = wasm_i16x8_max(vbias4, vbias_min);
vbias5 = wasm_i16x8_max(vbias5, vbias_min);
vbias6 = wasm_i16x8_max(vbias6, vbias_min);
vbias7 = wasm_i16x8_max(vbias7, vbias_min);
vf0 = wasm_f32x4_add(vf0, vbias0);
vf1 = wasm_f32x4_add(vf1, vbias1);
vf2 = wasm_f32x4_add(vf2, vbias2);
vf3 = wasm_f32x4_add(vf3, vbias3);
vf4 = wasm_f32x4_add(vf4, vbias4);
vf5 = wasm_f32x4_add(vf5, vbias5);
vf6 = wasm_f32x4_add(vf6, vbias6);
vf7 = wasm_f32x4_add(vf7, vbias7);
v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
v128_t vexpw6 = wasm_i32x4_shr(vf6, 13);
v128_t vexpw7 = wasm_i32x4_shr(vf7, 13);
const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
const v128_t vmantw6 = wasm_v128_and(vf6, vmanth_mask);
const v128_t vmantw7 = wasm_v128_and(vf7, vmanth_mask);
vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
vexpw6 = wasm_v128_and(vexpw6, vexph_mask);
vexpw7 = wasm_v128_and(vexpw7, vexph_mask);
const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
const v128_t vnonsignw6 = wasm_i32x4_add(vmantw6, vexpw6);
const v128_t vnonsignw7 = wasm_i32x4_add(vmantw7, vexpw7);
const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
const v128_t vnonsignh3 = wasm_i16x8_narrow_i32x4(vnonsignw6, vnonsignw7);
const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
const v128_t vabsh2 = wasm_v128_bitselect(vnanh, vnonsignh2, vnanmaskh2);
const v128_t vabsh3 = wasm_v128_bitselect(vnanh, vnonsignh3, vnanmaskh3);
const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
const v128_t vh3 = wasm_v128_or(vabsh3, vsignh3);
wasm_v128_store(o, vh0);
wasm_v128_store(o + 8, vh1);
wasm_v128_store(o + 16, vh2);
wasm_v128_store(o + 24, vh3);
o += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 13,355 | 42.083871 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-f16-vcvt/gen/f32-f16-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-f16-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f32_f16_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx_lo = wasm_v128_load(input);
const v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
const v128_t vh = wasm_v128_or(vabsh, vsignh);
wasm_v128_store(o, vh);
o += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const v128_t vx_lo = wasm_v128_load(input);
const float* input_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
const v128_t vx_hi = wasm_v128_load(input_hi);
const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
v128_t vh = wasm_v128_or(vabsh, vsignh);
if (batch & (4 * sizeof(float))) {
wasm_v128_store64_lane(o, vh, 0);
vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
o += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store32_lane(o, vh, 0);
vh = wasm_i64x2_shr(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store16_lane(o, vh, 0);
}
}
}
| 5,869 | 37.874172 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool-cw/f32-gavgpool-cw-neon-x4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_cw_ukernel__neon_x4(
size_t elements,
size_t channels,
const float* input,
float* output,
const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(elements != 0);
assert(elements % sizeof(float) == 0);
assert(channels != 0);
const uint32x4_t vmask = vld1q_u32(params->neon.mask);
const float32x2_t vmultiplier = vld1_dup_f32(¶ms->neon.multiplier);
const float32x2_t voutput_min = vld1_dup_f32(¶ms->neon.output_min);
const float32x2_t voutput_max = vld1_dup_f32(¶ms->neon.output_max);
do {
float32x4_t vsum0 = vmovq_n_f32(0.0f);
size_t n = elements;
if (n >= 16 * sizeof(float)) {
float32x4_t vsum1 = vmovq_n_f32(0.0f);
do {
const float32x4_t vi0 = vld1q_f32(input);
const float32x4_t vi1 = vld1q_f32(input + 4);
const float32x4_t vi2 = vld1q_f32(input + 8);
const float32x4_t vi3 = vld1q_f32(input + 12);
input += 16;
const float32x4_t acc0 = vaddq_f32(vi0, vi1);
const float32x4_t acc1 = vaddq_f32(vi2, vi3);
vsum0 = vaddq_f32(vsum0, acc0);
vsum1 = vaddq_f32(vsum1, acc1);
n -= 16 * sizeof(float);
} while (n >= 32 * sizeof(float));
vsum0 = vaddq_f32(vsum0, vsum1);
}
while (n >= 4 * sizeof(float)) {
const float32x4_t vi0 = vld1q_f32(input);
input += 4;
vsum0 = vaddq_f32(vsum0, vi0);
n -= 4 * sizeof(float);
}
if XNN_UNLIKELY(n != 0) {
float32x4_t vi0 = vld1q_f32(input); input = (const float*) ((uintptr_t) input + n);
vi0 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0)));
vsum0 = vaddq_f32(vsum0, vi0);
}
const float32x2_t vout2 = vpadd_f32(vget_low_f32(vsum0), vget_high_f32(vsum0));
const float32x2_t vout1 = vpadd_f32(vout2, vout2);
float32x2_t vout = vmul_f32(vout1, vmultiplier);
vout = vmax_f32(vout, voutput_min);
vout = vmin_f32(vout, voutput_max);
vst1_lane_f32(output, vout, 0); output += 1;
} while (--channels != 0);
}
| 2,347 | 30.306667 | 91 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool-cw/f32-gavgpool-cw-scalar-x1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_cw_ukernel__scalar_x1(
size_t elements,
size_t channels,
const float* input,
float* output,
const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(elements != 0);
assert(elements % sizeof(float) == 0);
assert(channels != 0);
const float* i0 = input;
const float vmultiplier = params->scalar.multiplier;
const float voutput_max = params->scalar.output_max;
const float voutput_min = params->scalar.output_min;
while (channels != 0) {
float vsum0 = 0.f;
float vsum1 = 0.f;
float vsum2 = 0.f;
float vsum3 = 0.f;
size_t n = elements;
while (n >= 4 * sizeof(float)) {
vsum0 += i0[0];
vsum1 += i0[1];
vsum2 += i0[2];
vsum3 += i0[3];
i0 += 4;
n -= 4 * sizeof(float);
}
while (n != 0) {
vsum0 += *i0++;
n -= sizeof(float);
}
float vout = ( (vsum0 + vsum1) + (vsum2 + vsum3) ) * vmultiplier;
vout = math_min_f32(vout, voutput_max);
vout = math_max_f32(vout, voutput_min);
*output++ = vout;
channels -= 1;
}
}
| 1,344 | 21.79661 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool-cw/f32-gavgpool-cw-sse-x4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_cw_ukernel__sse_x4(
size_t elements,
size_t channels,
const float* input,
float* output,
const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(elements != 0);
assert(elements % sizeof(float) == 0);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + elements);
const float* i2 = (const float*) ((uintptr_t) i1 + elements);
const float* i3 = (const float*) ((uintptr_t) i2 + elements);
const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
const __m128 vmultiplier = _mm_load_ps(params->sse.multiplier);
const __m128 voutput_min = _mm_load_ps(params->sse.output_min);
const __m128 voutput_max = _mm_load_ps(params->sse.output_max);
while (channels >= 4) {
__m128 vsum0 = _mm_setzero_ps();
__m128 vsum1 = _mm_setzero_ps();
__m128 vsum2 = _mm_setzero_ps();
__m128 vsum3 = _mm_setzero_ps();
size_t n = elements;
while (n >= 4 * sizeof(float)) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
vsum0 = _mm_add_ps(vsum0, vi0);
vsum1 = _mm_add_ps(vsum1, vi1);
vsum2 = _mm_add_ps(vsum2, vi2);
vsum3 = _mm_add_ps(vsum3, vi3);
n -= 4 * sizeof(float);
}
if XNN_UNLIKELY(n != 0) {
const __m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 = (const float*) ((uintptr_t) i0 + n);
const __m128 vi1 = _mm_and_ps(_mm_loadu_ps(i1), vmask);
i1 = (const float*) ((uintptr_t) i1 + n);
const __m128 vi2 = _mm_and_ps(_mm_loadu_ps(i2), vmask);
i2 = (const float*) ((uintptr_t) i2 + n);
const __m128 vi3 = _mm_and_ps(_mm_loadu_ps(i3), vmask);
i3 = (const float*) ((uintptr_t) i3 + n);
vsum0 = _mm_add_ps(vsum0, vi0);
vsum1 = _mm_add_ps(vsum1, vi1);
vsum2 = _mm_add_ps(vsum2, vi2);
vsum3 = _mm_add_ps(vsum3, vi3);
}
// Having exactly 4 rows makes this work out nicely as we end up with
// the 4 totals in 4 different lanes of the same vector.
const __m128 vsum01 = _mm_add_ps(_mm_unpacklo_ps(vsum0, vsum1), _mm_unpackhi_ps(vsum0, vsum1));
const __m128 vsum23 = _mm_add_ps(_mm_unpacklo_ps(vsum2, vsum3), _mm_unpackhi_ps(vsum2, vsum3));
const __m128 vsum = _mm_add_ps(_mm_movelh_ps(vsum01, vsum23), _mm_movehl_ps(vsum23, vsum01));
__m128 vout = _mm_mul_ps(vsum, vmultiplier);
vout = _mm_max_ps(vout, voutput_min);
vout = _mm_min_ps(vout, voutput_max);
_mm_storeu_ps(output, vout);
output += 4;
i0 = i3;
i1 = (const float*) ((uintptr_t) i0 + elements);
i2 = (const float*) ((uintptr_t) i1 + elements);
i3 = (const float*) ((uintptr_t) i2 + elements);
channels -= 4;
}
while (channels != 0) {
__m128 vsum = _mm_setzero_ps();
size_t n = elements;
while (n >= 4 * sizeof(float)) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
vsum = _mm_add_ps(vsum, vi0);
n -= 4 * sizeof(float);
}
if XNN_UNLIKELY(n != 0) {
__m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask);
i0 = (const float*) ((uintptr_t) i0 + n);
vsum = _mm_add_ps(vsum, vi0);
}
vsum = _mm_add_ps(vsum, _mm_movehl_ps(vsum, vsum));
vsum = _mm_add_ss(vsum, _mm_shuffle_ps(vsum, vsum, _MM_SHUFFLE(3, 2, 1, 1)));
__m128 vout = _mm_mul_ss(vsum, vmultiplier);
vout = _mm_max_ss(vout, voutput_min);
vout = _mm_min_ss(vout, voutput_max);
_mm_store_ss(output, vout);
output += 1;
channels -= 1;
}
}
| 3,951 | 31.393443 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t packed_channels = round_up_po2(channels, 4);
const size_t input_increment = 7 * input_stride - packed_channels * sizeof(float);
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum016 = vaddq_f32(vsum01, vi6);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum = vaddq_f32(vsum016, vsum2345);
vst1q_f32(b, vsum); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
for (size_t c = 0; c < channels; c += 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum6a = vaddq_f32(vi6, vacc);
const float32x4_t vsum0123 = vaddq_f32(vsum01, vsum23);
const float32x4_t vsum456a = vaddq_f32(vsum45, vsum6a);
const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a);
vst1q_f32(b, vsum); b += 4;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->scalar.scale);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
b = buffer;
while (channels >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vacc = vld1q_f32(b); b += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum6a = vaddq_f32(vi6, vacc);
const float32x4_t vsum0123 = vaddq_f32(vsum01, vsum23);
const float32x4_t vsum456a = vaddq_f32(vsum45, vsum6a);
const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
vst1q_f32(output, vout); output += 4;
channels -= 4;
}
if (channels != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vacc = vld1q_f32(b);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum6a = vaddq_f32(vi6, vacc);
const float32x4_t vsum0123 = vaddq_f32(vsum01, vsum23);
const float32x4_t vsum456a = vaddq_f32(vsum45, vsum6a);
const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
float32x2_t vout_lo = vget_low_f32(vout);
if (channels & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (channels & 1) {
vst1_lane_f32(output, vout_lo, 0);
}
}
}
| 6,362 | 33.394595 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - channels * sizeof(float);
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum016 = vsum01 + vi6;
const float vsum2345 = vsum23 + vsum45;
const float vsum = vsum016 + vsum2345;
*b++ = vsum;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum6a = vi6 + vacc;
const float vsum0123 = vsum01 + vsum23;
const float vsum456a = vsum45 + vsum6a;
const float vsum = vsum0123 + vsum456a;
*b++ = vsum;
} while (--c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum6a = vi6 + vacc;
const float vsum0123 = vsum01 + vsum23;
const float vsum456a = vsum45 + vsum6a;
const float vsum = vsum0123 + vsum456a;
float vout = vsum * vscale;
vout = math_max_f32(vout, vmin);
vout = math_min_f32(vout, vmax);
*output++ = vout;
} while (--channels != 0);
}
| 4,176 | 27.033557 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t packed_channels = round_up_po2(channels, 4);
const size_t input_increment = 7 * input_stride - packed_channels * sizeof(float);
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum016 = _mm_add_ps(vsum01, vi6);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum = _mm_add_ps(vsum016, vsum2345);
_mm_store_ps(b, vsum); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
for (size_t c = 0; c < channels; c += 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vacc = _mm_load_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum6a = _mm_add_ps(vi6, vacc);
const __m128 vsum0123 = _mm_add_ps(vsum01, vsum23);
const __m128 vsum456a = _mm_add_ps(vsum45, vsum6a);
const __m128 vsum = _mm_add_ps(vsum0123, vsum456a);
_mm_store_ps(b, vsum); b += 4;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->sse.scale);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
b = buffer;
while (channels >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vacc = _mm_load_ps(b);
b += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum6a = _mm_add_ps(vi6, vacc);
const __m128 vsum0123 = _mm_add_ps(vsum01, vsum23);
const __m128 vsum456a = _mm_add_ps(vsum45, vsum6a);
const __m128 vsum = _mm_add_ps(vsum0123, vsum456a);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
_mm_storeu_ps(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vacc = _mm_loadu_ps(b);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum6a = _mm_add_ps(vi6, vacc);
const __m128 vsum0123 = _mm_add_ps(vsum01, vsum23);
const __m128 vsum456a = _mm_add_ps(vsum45, vsum6a);
const __m128 vsum = _mm_add_ps(vsum0123, vsum456a);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
if (channels & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (channels & 1) {
_mm_store_ss(output, vout);
}
}
}
| 6,236 | 28.985577 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - channels * sizeof(float);
float* b = buffer;
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum016 = vsum01 + vi6;
const float vsum2345 = vsum23 + vsum45;
const float vsum = vsum016 + vsum2345;
*b++ = vsum;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
size_t c = channels;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vacc = *b;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum6a = vi6 + vacc;
const float vsum0123 = vsum01 + vsum23;
const float vsum456a = vsum45 + vsum6a;
const float vsum = vsum0123 + vsum456a;
*b++ = vsum;
} while (--c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
b = buffer;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vacc = *b++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum6a = vi6 + vacc;
const float vsum0123 = vsum01 + vsum23;
const float vsum456a = vsum45 + vsum6a;
const float vsum = vsum0123 + vsum456a;
float vout = vsum * vscale;
vout = __builtin_wasm_max_f32(vout, vmin);
vout = __builtin_wasm_min_f32(vout, vmax);
*output++ = vout;
} while (--channels != 0);
}
| 4,194 | 27.154362 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t packed_channels = round_up_po2(channels, 4);
const size_t input_increment = 7 * input_stride - packed_channels * sizeof(float);
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
wasm_v128_store(b, vsum);
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
wasm_v128_store(b, vsum); b += 4;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
b = buffer;
while (channels >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (channels & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (channels & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
| 6,547 | 30.180952 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7p7x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* buffer,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
const size_t packed_channels = round_up_po2(channels, 4);
const size_t input_increment = 7 * input_stride - packed_channels * sizeof(float);
float* b = buffer;
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
wasm_v128_store(b, vsum);
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
b = buffer;
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
i2 = (const float*) ((uintptr_t) i2 + input_increment);
i3 = (const float*) ((uintptr_t) i3 + input_increment);
i4 = (const float*) ((uintptr_t) i4 + input_increment);
i5 = (const float*) ((uintptr_t) i5 + input_increment);
i6 = (const float*) ((uintptr_t) i6 + input_increment);
for (size_t c = 0; c < channels; c += 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
wasm_v128_store(b, vsum); b += 4;
}
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
if (rows < 2) {
i1 = zero;
}
i2 = (const float*) ((uintptr_t) i2 + input_increment);
if (rows <= 2) {
i2 = zero;
}
i3 = (const float*) ((uintptr_t) i3 + input_increment);
if (rows < 4) {
i3 = zero;
}
i4 = (const float*) ((uintptr_t) i4 + input_increment);
if (rows <= 4) {
i4 = zero;
}
i5 = (const float*) ((uintptr_t) i5 + input_increment);
if (rows < 6) {
i5 = zero;
}
i6 = (const float*) ((uintptr_t) i6 + input_increment);
if (rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
b = buffer;
while (channels >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vacc = wasm_v128_load(b);
b += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vacc = wasm_v128_load(b);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (channels & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (channels & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
| 6,551 | 30.2 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-neon-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__neon_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->scalar.scale);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
while (channels >= 4) {
const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum016 = vaddq_f32(vsum01, vi6);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum = vaddq_f32(vsum016, vsum2345);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
vst1q_f32(output, vout); output += 4;
channels -= 4;
}
if (channels != 0) {
const float32x4_t vi0 = vld1q_f32(i0);
const float32x4_t vi1 = vld1q_f32(i1);
const float32x4_t vi2 = vld1q_f32(i2);
const float32x4_t vi3 = vld1q_f32(i3);
const float32x4_t vi4 = vld1q_f32(i4);
const float32x4_t vi5 = vld1q_f32(i5);
const float32x4_t vi6 = vld1q_f32(i6);
const float32x4_t vsum01 = vaddq_f32(vi0, vi1);
const float32x4_t vsum23 = vaddq_f32(vi2, vi3);
const float32x4_t vsum45 = vaddq_f32(vi4, vi5);
const float32x4_t vsum016 = vaddq_f32(vsum01, vi6);
const float32x4_t vsum2345 = vaddq_f32(vsum23, vsum45);
const float32x4_t vsum = vaddq_f32(vsum016, vsum2345);
float32x4_t vout = vmulq_f32(vsum, vscale);
vout = vmaxq_f32(vout, vmin);
vout = vminq_f32(vout, vmax);
float32x2_t vout_lo = vget_low_f32(vout);
if (channels & 2) {
vst1_f32(output, vout_lo); output += 2;
vout_lo = vget_high_f32(vout);
}
if (channels & 1) {
vst1_lane_f32(output, vout_lo, 0);
}
}
}
| 3,359 | 28.734513 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-scalar-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__scalar_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum016 = vsum01 + vi6;
const float vsum2345 = vsum23 + vsum45;
const float vsum = vsum016 + vsum2345;
float vout = vsum * vscale;
vout = math_max_f32(vout, vmin);
vout = math_min_f32(vout, vmax);
*output++ = vout;
} while (--channels != 0);
}
| 1,973 | 23.987342 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-sse-c4.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gavgpool.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->sse.scale);
const __m128 vmin = _mm_load_ps(params->sse.min);
const __m128 vmax = _mm_load_ps(params->sse.max);
while (channels >= 4) {
const __m128 vi0 = _mm_loadu_ps(i0);
i0 += 4;
const __m128 vi1 = _mm_loadu_ps(i1);
i1 += 4;
const __m128 vi2 = _mm_loadu_ps(i2);
i2 += 4;
const __m128 vi3 = _mm_loadu_ps(i3);
i3 += 4;
const __m128 vi4 = _mm_loadu_ps(i4);
i4 += 4;
const __m128 vi5 = _mm_loadu_ps(i5);
i5 += 4;
const __m128 vi6 = _mm_loadu_ps(i6);
i6 += 4;
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum016 = _mm_add_ps(vsum01, vi6);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum = _mm_add_ps(vsum016, vsum2345);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
_mm_storeu_ps(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const __m128 vi0 = _mm_loadu_ps(i0);
const __m128 vi1 = _mm_loadu_ps(i1);
const __m128 vi2 = _mm_loadu_ps(i2);
const __m128 vi3 = _mm_loadu_ps(i3);
const __m128 vi4 = _mm_loadu_ps(i4);
const __m128 vi5 = _mm_loadu_ps(i5);
const __m128 vi6 = _mm_loadu_ps(i6);
const __m128 vsum01 = _mm_add_ps(vi0, vi1);
const __m128 vsum23 = _mm_add_ps(vi2, vi3);
const __m128 vsum45 = _mm_add_ps(vi4, vi5);
const __m128 vsum016 = _mm_add_ps(vsum01, vi6);
const __m128 vsum2345 = _mm_add_ps(vsum23, vsum45);
const __m128 vsum = _mm_add_ps(vsum016, vsum2345);
__m128 vout = _mm_mul_ps(vsum, vscale);
vout = _mm_max_ps(vout, vmin);
vout = _mm_min_ps(vout, vmax);
if (channels & 2) {
_mm_storel_pi((__m64*) output, vout);
vout = _mm_movehl_ps(vout, vout);
output += 2;
}
if (channels & 1) {
_mm_store_ss(output, vout);
}
}
}
| 3,249 | 25.859504 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-wasm-c1.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__wasm_c1(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const float vscale = params->scalar.scale;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
const float vi0 = *i0++;
const float vi1 = *i1++;
const float vi2 = *i2++;
const float vi3 = *i3++;
const float vi4 = *i4++;
const float vi5 = *i5++;
const float vi6 = *i6++;
const float vsum01 = vi0 + vi1;
const float vsum23 = vi2 + vi3;
const float vsum45 = vi4 + vi5;
const float vsum016 = vsum01 + vi6;
const float vsum2345 = vsum23 + vsum45;
const float vsum = vsum016 + vsum2345;
float vout = vsum * vscale;
vout = __builtin_wasm_max_f32(vout, vmin);
vout = __builtin_wasm_min_f32(vout, vmax);
*output++ = vout;
} while (--channels != 0);
}
| 1,991 | 24.21519 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-wasmsimd-arm-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_arm_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
while (channels >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
wasm_v128_store(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_max(vout, vmin);
vout = wasm_f32x4_min(vout, vmax);
if (channels & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (channels & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
| 3,454 | 27.319672 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gavgpool/f32-gavgpool-7x-minmax-wasmsimd-x86-c4.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_x86_c4(
size_t rows,
size_t channels,
const float* input,
size_t input_stride,
const float* zero,
float* output,
const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const float* i0 = input;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
if (rows < 2) {
i1 = zero;
}
const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
if (rows <= 2) {
i2 = zero;
}
const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
if (rows < 4) {
i3 = zero;
}
const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
if (rows <= 4) {
i4 = zero;
}
const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
if (rows < 6) {
i5 = zero;
}
const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
if (rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load32_splat(¶ms->scalar.scale);
const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
while (channels >= 4) {
const v128_t vi0 = wasm_v128_load(i0);
i0 += 4;
const v128_t vi1 = wasm_v128_load(i1);
i1 += 4;
const v128_t vi2 = wasm_v128_load(i2);
i2 += 4;
const v128_t vi3 = wasm_v128_load(i3);
i3 += 4;
const v128_t vi4 = wasm_v128_load(i4);
i4 += 4;
const v128_t vi5 = wasm_v128_load(i5);
i5 += 4;
const v128_t vi6 = wasm_v128_load(i6);
i6 += 4;
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
wasm_v128_store(output, vout);
output += 4;
channels -= 4;
}
if (channels != 0) {
const v128_t vi0 = wasm_v128_load(i0);
const v128_t vi1 = wasm_v128_load(i1);
const v128_t vi2 = wasm_v128_load(i2);
const v128_t vi3 = wasm_v128_load(i3);
const v128_t vi4 = wasm_v128_load(i4);
const v128_t vi5 = wasm_v128_load(i5);
const v128_t vi6 = wasm_v128_load(i6);
const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
v128_t vout = wasm_f32x4_mul(vsum, vscale);
vout = wasm_f32x4_pmax(vmin, vout);
vout = wasm_f32x4_pmin(vmax, vout);
if (channels & 2) {
wasm_v128_store64_lane(output, vout, 0);
vout = wasm_v64x2_shuffle(vout, vout, 1, 1);
output += 2;
}
if (channels & 1) {
wasm_v128_store32_lane(output, vout, 0);
output += 1;
}
}
}
| 3,458 | 27.352459 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x16-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x16__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
float32x4_t vacc0x89AB = vld1q_f32(w); w += 4;
float32x4_t vacc0xCDEF = vld1q_f32(w); w += 4;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc0 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc0 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc0, vget_low_f32(va0), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc0, vget_low_f32(va0), 0);
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc1 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc1, vget_low_f32(va0), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc1, vget_low_f32(va0), 1);
const float32x4_t vb0123c2 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c2 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc2 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc2 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc2, vget_high_f32(va0), 0);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc2, vget_high_f32(va0), 0);
const float32x4_t vb0123c3 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c3 = vld1q_f32(w); w += 4;
const float32x4_t vb89ABc3 = vld1q_f32(w); w += 4;
const float32x4_t vbCDEFc3 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc0x89AB = vfmaq_lane_f32(vacc0x89AB, vb89ABc3, vget_high_f32(va0), 1);
vacc0xCDEF = vfmaq_lane_f32(vacc0xCDEF, vbCDEFc3, vget_high_f32(va0), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
const float32x4_t vb89AB = vld1q_f32(w); w += 4;
const float32x4_t vbCDEF = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc0x89AB = vfmaq_f32(vacc0x89AB, va0, vb89AB);
vacc0xCDEF = vfmaq_f32(vacc0xCDEF, va0, vbCDEF);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc0x89AB = vminq_f32(vacc0x89AB, vmax);
vacc0xCDEF = vminq_f32(vacc0xCDEF, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc0x89AB = vmaxq_f32(vacc0x89AB, vmin);
vacc0xCDEF = vmaxq_f32(vacc0xCDEF, vmin);
if XNN_LIKELY(nc >= 16) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
vst1q_f32(c0 + 8, vacc0x89AB);
vst1q_f32(c0 + 12, vacc0xCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vst1q_f32(c0, vacc0x4567); c0 += 4;
vacc0x0123 = vacc0x89AB;
vacc0x4567 = vacc0xCDEF;
}
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
vacc0x4567 = vacc0x89AB;
vacc0x89AB = vacc0xCDEF;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,509 | 33.012346 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
vacc0x01234567 = vacc0x89ABCDEF;
c0 += 8;
}
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,667 | 23.703704 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x16-minmax-avx512f-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_gemm_minmax_ukernel_1x16__avx512f_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
size_t k = kc;
do {
const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
w += 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
a0 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 2,124 | 24.297619 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
vacc0x01234567 = vacc0x89ABCDEF;
c0 += 8;
}
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,642 | 23.472222 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,315 | 21.705882 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_1x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,393 | 22.470588 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_1x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,064 | 20.736842 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_1x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
size_t k = kc;
do {
const float va0 = *a0++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
if XNN_LIKELY(nc >= 4) {
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,102 | 21.136842 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,888 | 26.254717 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,172 | 21.873684 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
w += 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
w += 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,160 | 21.747368 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,964 | 26.453704 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,877 | 26.150943 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t vb0123 = vld1q_f32(w); w += 4;
const float32x4_t vb4567 = vld1q_f32(w); w += 4;
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c0, vacc0x01); c0 += 2;
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,967 | 26.481481 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,830 | 25.978873 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
w += 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 2,289 | 21.9 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 3,933 | 26.704225 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,514 | 24.15 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,088 | 28.846715 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,504 | 24.05 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,054 | 28.59854 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,429 | 23.3 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,979 | 28.051095 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,433 | 23.34 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-minmax-wasmsimd-x86-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,983 | 28.080292 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-relu-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,253 | 22.479167 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-relu-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,827 | 27.781955 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-relu-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,236 | 22.302083 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-relu-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,786 | 27.473684 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,095 | 21.537634 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,669 | 27.230769 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-wasmsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8__wasmsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,078 | 21.354839 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8-wasmsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8__wasmsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
k -= sizeof(float);
} while (k != 0);
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,628 | 26.915385 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-minmax-sse.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8s4__sse(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
w += 32;
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,001 | 31.064103 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-minmax-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,342 | 31.779141 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-minmax-wasmrelaxedsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8s4__wasmrelaxedsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
w += 32;
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,290 | 31.460123 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8s4__wasmsimd_arm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,215 | 31 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_1x8s4__wasmsimd_x86(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
w += 32;
}
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,219 | 31.02454 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-relu-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,081 | 30.962264 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-relu-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_relu_ukernel_1x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
w += 32;
}
const v128_t vzero = wasm_i32x4_const_splat(0);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,022 | 30.591195 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-wasmrelaxedsimd-fma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8s4__wasmrelaxedsimd_fma(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567c3, vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2, vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3, vacc0x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,923 | 30.564103 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-1x8s4-wasmsimd.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-s4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_ukernel_1x8s4__wasmsimd(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567c3), vacc0x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
v128_t va0 = wasm_v128_load(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2), vacc0x4567);
va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3), vacc0x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3), vacc0x4567);
w += 32;
}
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c0, vacc0x0123);
vacc0x0123 = vacc0x4567;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,864 | 30.185897 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-2x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, vmin);
vacc01 = math_max_f32(vacc01, vmin);
vacc02 = math_max_f32(vacc02, vmin);
vacc03 = math_max_f32(vacc03, vmin);
vacc10 = math_max_f32(vacc10, vmin);
vacc11 = math_max_f32(vacc11, vmin);
vacc12 = math_max_f32(vacc12, vmin);
vacc13 = math_max_f32(vacc13, vmin);
vacc00 = math_min_f32(vacc00, vmax);
vacc01 = math_min_f32(vacc01, vmax);
vacc02 = math_min_f32(vacc02, vmax);
vacc03 = math_min_f32(vacc03, vmax);
vacc10 = math_min_f32(vacc10, vmax);
vacc11 = math_min_f32(vacc11, vmax);
vacc12 = math_min_f32(vacc12, vmax);
vacc13 = math_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,463 | 24.470588 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-2x4-minmax-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_minmax_ukernel_2x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, vmin);
vacc01 = __builtin_wasm_max_f32(vacc01, vmin);
vacc02 = __builtin_wasm_max_f32(vacc02, vmin);
vacc03 = __builtin_wasm_max_f32(vacc03, vmin);
vacc10 = __builtin_wasm_max_f32(vacc10, vmin);
vacc11 = __builtin_wasm_max_f32(vacc11, vmin);
vacc12 = __builtin_wasm_max_f32(vacc12, vmin);
vacc13 = __builtin_wasm_max_f32(vacc13, vmin);
vacc00 = __builtin_wasm_min_f32(vacc00, vmax);
vacc01 = __builtin_wasm_min_f32(vacc01, vmax);
vacc02 = __builtin_wasm_min_f32(vacc02, vmax);
vacc03 = __builtin_wasm_min_f32(vacc03, vmax);
vacc10 = __builtin_wasm_min_f32(vacc10, vmax);
vacc11 = __builtin_wasm_min_f32(vacc11, vmax);
vacc12 = __builtin_wasm_min_f32(vacc12, vmax);
vacc13 = __builtin_wasm_min_f32(vacc13, vmax);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,621 | 25.632353 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-2x4-relu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
vacc00 = math_max_f32(vacc00, 0.0f);
vacc01 = math_max_f32(vacc01, 0.0f);
vacc02 = math_max_f32(vacc02, 0.0f);
vacc03 = math_max_f32(vacc03, 0.0f);
vacc10 = math_max_f32(vacc10, 0.0f);
vacc11 = math_max_f32(vacc11, 0.0f);
vacc12 = math_max_f32(vacc12, 0.0f);
vacc13 = math_max_f32(vacc13, 0.0f);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,048 | 23.392 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-2x4-relu-wasm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_relu_ukernel_2x4__wasm(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
vacc00 = __builtin_wasm_max_f32(vacc00, 0.0f);
vacc01 = __builtin_wasm_max_f32(vacc01, 0.0f);
vacc02 = __builtin_wasm_max_f32(vacc02, 0.0f);
vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f);
vacc10 = __builtin_wasm_max_f32(vacc10, 0.0f);
vacc11 = __builtin_wasm_max_f32(vacc11, 0.0f);
vacc12 = __builtin_wasm_max_f32(vacc12, 0.0f);
vacc13 = __builtin_wasm_max_f32(vacc13, 0.0f);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 3,126 | 24.016 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-2x4-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_f32_gemm_ukernel_2x4__scalar(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
do {
float vacc00 = w[0];
float vacc01 = w[1];
float vacc02 = w[2];
float vacc03 = w[3];
w += 4;
float vacc10 = vacc00;
float vacc11 = vacc01;
float vacc12 = vacc02;
float vacc13 = vacc03;
size_t k = kc;
do {
const float va0 = *a0++;
const float va1 = *a1++;
const float vb0 = w[0];
const float vb1 = w[1];
const float vb2 = w[2];
const float vb3 = w[3];
w += 4;
vacc00 = math_muladd_f32(va0, vb0, vacc00);
vacc01 = math_muladd_f32(va0, vb1, vacc01);
vacc02 = math_muladd_f32(va0, vb2, vacc02);
vacc03 = math_muladd_f32(va0, vb3, vacc03);
vacc10 = math_muladd_f32(va1, vb0, vacc10);
vacc11 = math_muladd_f32(va1, vb1, vacc11);
vacc12 = math_muladd_f32(va1, vb2, vacc12);
vacc13 = math_muladd_f32(va1, vb3, vacc13);
k -= sizeof(float);
} while (k != 0);
if XNN_LIKELY(nc >= 4) {
c1[0] = vacc10;
c1[1] = vacc11;
c1[2] = vacc12;
c1[3] = vacc13;
c1 = (float*) ((uintptr_t) c1 + cn_stride);
c0[0] = vacc00;
c0[1] = vacc01;
c0[2] = vacc02;
c0[3] = vacc03;
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a1 = (const void*) ((uintptr_t) a1 - kc);
a0 = (const void*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = vacc10;
c1[1] = vacc11;
vacc10 = vacc12;
c1 += 2;
c0[0] = vacc00;
c0[1] = vacc01;
vacc00 = vacc02;
c0 += 2;
}
if (nc & 1) {
c1[0] = vacc10;
c0[0] = vacc00;
}
nc = 0;
}
} while (nc != 0);
}
| 2,718 | 22.239316 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x16-minmax-avx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x16__avx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,428 | 30.935294 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x16-minmax-fma3-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x16__fma3_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_load_ps(w + 0);
__m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w += 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 vb01234567 = _mm256_load_ps(w);
const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
w += 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 5,351 | 30.482353 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-sse-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__sse_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va0c0000 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va1c0000 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 va2c0000 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 0, 0, 0));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
const __m128 va0c1111 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va1c1111 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 va2c1111 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(1, 1, 1, 1));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
const __m128 va0c2222 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va1c2222 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 va2c2222 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(2, 2, 2, 2));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,937 | 34.4375 | 80 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-sse-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__sse_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 4,416 | 27.681818 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m128 vacc0x0123 = _mm_load_ps(w + 0);
__m128 vacc0x4567 = _mm_load_ps(w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 vb0123c0 = _mm_load_ps(w + 0);
const __m128 vb4567c0 = _mm_load_ps(w + 4);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 vb0123c1 = _mm_load_ps(w + 8);
const __m128 vb4567c1 = _mm_load_ps(w + 12);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 vb0123c2 = _mm_load_ps(w + 16);
const __m128 vb4567c2 = _mm_load_ps(w + 20);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 vb0123c3 = _mm_load_ps(w + 24);
const __m128 vb4567c3 = _mm_load_ps(w + 28);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 vb0123 = _mm_load_ps(w);
const __m128 vb4567 = _mm_load_ps(w + 4);
w += 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,244 | 35.808036 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmrelaxedsimd-fma-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,947 | 31.12987 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmrelaxedsimd-fma-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb0123c0, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb0123c0, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb0123c0, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c0, vb4567c0, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c0, vb4567c0, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c0, vb4567c0, vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb0123c1, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb0123c1, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb0123c1, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c1, vb4567c1, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c1, vb4567c1, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c1, vb4567c1, vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb0123c2, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb0123c2, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb0123c2, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c2, vb4567c2, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c2, vb4567c2, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c2, vb4567c2, vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb0123c3, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb0123c3, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb0123c3, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0c3, vb4567c3, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1c3, vb4567c3, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2c3, vb4567c3, vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = __builtin_wasm_relaxed_madd_f32x4(va0, vb0123, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_madd_f32x4(va1, vb0123, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_madd_f32x4(va2, vb0123, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_madd_f32x4(va0, vb4567, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_madd_f32x4(va1, vb4567, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_madd_f32x4(va2, vb4567, vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,537 | 37.986301 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmrelaxedsimd-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,925 | 30.987013 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmrelaxedsimd-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,443 | 37.557078 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmsimd-arm-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,706 | 29.564935 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmsimd-arm-splat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-splat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(a0);
a0 += 4;
const v128_t va1 = wasm_v128_load(a1);
a1 += 4;
const v128_t va2 = wasm_v128_load(a2);
a2 += 4;
const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
const v128_t vb0123c0 = wasm_v128_load(w + 0);
const v128_t vb4567c0 = wasm_v128_load(w + 4);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb0123c0), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb0123c0), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb0123c0), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c0, vb4567c0), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c0, vb4567c0), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c0, vb4567c0), vacc2x4567);
const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
const v128_t vb0123c1 = wasm_v128_load(w + 8);
const v128_t vb4567c1 = wasm_v128_load(w + 12);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb0123c1), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb0123c1), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb0123c1), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c1, vb4567c1), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c1, vb4567c1), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c1, vb4567c1), vacc2x4567);
const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
const v128_t vb0123c2 = wasm_v128_load(w + 16);
const v128_t vb4567c2 = wasm_v128_load(w + 20);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb0123c2), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb0123c2), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb0123c2), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c2, vb4567c2), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c2, vb4567c2), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c2, vb4567c2), vacc2x4567);
const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
const v128_t vb0123c3 = wasm_v128_load(w + 24);
const v128_t vb4567c3 = wasm_v128_load(w + 28);
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb0123c3), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb0123c3), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb0123c3), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0c3, vb4567c3), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1c3, vb4567c3), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2c3, vb4567c3), vacc2x4567);
w += 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb0123), vacc0x0123);
vacc1x0123 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb0123), vacc1x0123);
vacc2x0123 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb0123), vacc2x0123);
vacc0x4567 = wasm_f32x4_add(wasm_f32x4_mul(va0, vb4567), vacc0x4567);
vacc1x4567 = wasm_f32x4_add(wasm_f32x4_mul(va1, vb4567), vacc1x4567);
vacc2x4567 = wasm_f32x4_add(wasm_f32x4_mul(va2, vb4567), vacc2x4567);
k -= sizeof(float);
} while (k != 0);
}
vacc0x0123 = wasm_f32x4_max(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_max(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_max(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_max(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_max(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_max(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_min(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_min(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_min(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_min(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_min(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_min(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,224 | 36.557078 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-gemm/gen/f32-gemm-3x8-minmax-wasmsimd-x86-loadsplat.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/wasmsimd-loadsplat.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
void xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0123 = wasm_v128_load(w + 0);
v128_t vacc0x4567 = wasm_v128_load(w + 4);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc1x4567 = vacc0x4567;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc2x4567 = vacc0x4567;
w += 8;
size_t k = kc;
do {
const v128_t va0 = wasm_v128_load32_splat(a0);
a0 += 1;
const v128_t va1 = wasm_v128_load32_splat(a1);
a1 += 1;
const v128_t va2 = wasm_v128_load32_splat(a2);
a2 += 1;
const v128_t vb0123 = wasm_v128_load(w);
const v128_t vb4567 = wasm_v128_load(w + 4);
w += 8;
vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
k -= sizeof(float);
} while (k != 0);
vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
if XNN_LIKELY(nc >= 8) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
wasm_v128_store(c0, vacc0x0123);
wasm_v128_store(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
wasm_v128_store(c2, vacc2x0123);
wasm_v128_store(c1, vacc1x0123);
wasm_v128_store(c0, vacc0x0123);
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
wasm_v128_store64_lane(c2, vacc2x0123, 0);
wasm_v128_store64_lane(c1, vacc1x0123, 0);
wasm_v128_store64_lane(c0, vacc0x0123, 0);
vacc2x0123 = wasm_v64x2_shuffle(vacc2x0123, vacc2x0123, 1, 1);
vacc1x0123 = wasm_v64x2_shuffle(vacc1x0123, vacc1x0123, 1, 1);
vacc0x0123 = wasm_v64x2_shuffle(vacc0x0123, vacc0x0123, 1, 1);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
wasm_v128_store32_lane(c2, vacc2x0123, 0);
wasm_v128_store32_lane(c1, vacc1x0123, 0);
wasm_v128_store32_lane(c0, vacc0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,718 | 29.642857 | 75 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.