repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neon_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 4,721 | 37.704918 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neon_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
vxGHIJ = vaddq_f32(vxGHIJ, vmagic_bias);
vxKLMN = vaddq_f32(vxKLMN, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int32x4_t vaccGHIJ = vqsubq_s32(vreinterpretq_s32_f32(vxGHIJ), vmagic_bias_less_zero_point);
const int32x4_t vaccKLMN = vqsubq_s32(vreinterpretq_s32_f32(vxKLMN), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
const int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t vyGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = vmax_u8(vyGHIJKLMN, vget_low_u8(voutput_min));
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = vmin_u8(vyGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
vst1_u8(output, vyGHIJKLMN); output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 5,528 | 39.955556 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neon_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
float32x4_t vxOPQR = vld1q_f32(input); input += 4;
float32x4_t vxSTUV = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vxOPQR = vmulq_f32(vxOPQR, vscale);
vxSTUV = vmulq_f32(vxSTUV, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
vxGHIJ = vaddq_f32(vxGHIJ, vmagic_bias);
vxKLMN = vaddq_f32(vxKLMN, vmagic_bias);
vxOPQR = vaddq_f32(vxOPQR, vmagic_bias);
vxSTUV = vaddq_f32(vxSTUV, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int32x4_t vaccGHIJ = vqsubq_s32(vreinterpretq_s32_f32(vxGHIJ), vmagic_bias_less_zero_point);
const int32x4_t vaccKLMN = vqsubq_s32(vreinterpretq_s32_f32(vxKLMN), vmagic_bias_less_zero_point);
const int32x4_t vaccOPQR = vqsubq_s32(vreinterpretq_s32_f32(vxOPQR), vmagic_bias_less_zero_point);
const int32x4_t vaccSTUV = vqsubq_s32(vreinterpretq_s32_f32(vxSTUV), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
const int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
const int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t vyGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = vmaxq_u8(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = vminq_u8(vyGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
vst1q_u8(output, vyGHIJKLMNOPQRSTUV); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 6,174 | 41.881944 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neon_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->neon.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, voutput_min);
vy = vmin_u8(vy, voutput_max);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, voutput_min);
vy = vmin_u8(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 3,130 | 33.788889 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neonv8-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neonv8_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neonv8.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 4,106 | 34.713043 | 102 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neonv8-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neonv8_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neonv8.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
const int32x4_t vaccGHIJ = vcvtnq_s32_f32(vxGHIJ);
const int32x4_t vaccKLMN = vcvtnq_s32_f32(vxKLMN);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x8_t vyGHIJKLMN = vqmovun_s16(vaccGHIJKLMN);
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = vmax_u8(vyGHIJKLMN, vget_low_u8(voutput_min));
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = vmin_u8(vyGHIJKLMN, vget_low_u8(voutput_max));
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
vst1_u8(output, vyGHIJKLMN); output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 4,786 | 36.692913 | 102 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neonv8-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neonv8_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neonv8.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
float32x4_t vxOPQR = vld1q_f32(input); input += 4;
float32x4_t vxSTUV = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vxOPQR = vmulq_f32(vxOPQR, vscale);
vxSTUV = vmulq_f32(vxSTUV, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
const int32x4_t vaccGHIJ = vcvtnq_s32_f32(vxGHIJ);
const int32x4_t vaccKLMN = vcvtnq_s32_f32(vxKLMN);
const int32x4_t vaccOPQR = vcvtnq_s32_f32(vxOPQR);
const int32x4_t vaccSTUV = vcvtnq_s32_f32(vxSTUV);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t vyGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = vmaxq_u8(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = vminq_u8(vyGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vy0123456789ABCDEF); output += 16;
vst1q_u8(output, vyGHIJKLMNOPQRSTUV); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, vget_low_u8(voutput_min));
vy = vmin_u8(vy, vget_low_u8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 5,305 | 38.303704 | 102 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__neonv8_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->neonv8.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, voutput_min);
vy = vmin_u8(vy, voutput_max);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
vy = vmax_u8(vy, voutput_min);
vy = vmin_u8(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 2,769 | 31.588235 | 91 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-fmagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_fmagic_x1(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,448 | 28.571429 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-fmagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_fmagic_x2(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output = (uint8_t) vy;
}
}
| 2,143 | 27.972973 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-fmagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_fmagic_x3(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,535 | 28.488372 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-fmagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_fmagic_x4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx3 = math_max_f32(vx3, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx3 = math_min_f32(vx3, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output[3] = (uint8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,841 | 29.234043 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-imagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_x1(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,350 | 26.571429 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-imagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_x2(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output = (uint8_t) vy;
}
}
| 1,977 | 25.72973 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-imagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_x3(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy2 = math_max_s32(vy2, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy2 = math_min_s32(vy2, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,335 | 26.162791 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-imagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_imagic_x4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy2 = math_max_s32(vy2, vmagic_min);
vy3 = math_max_s32(vy3, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy2 = math_min_s32(vy2, vmagic_max);
vy3 = math_min_s32(vy3, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output[3] = (uint8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,607 | 26.744681 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-lrintf-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_lrintf_x1(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,345 | 27.041667 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-lrintf-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_lrintf_x2(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output = (uint8_t) vy;
}
}
| 1,955 | 26.942857 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-lrintf-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_lrintf_x3(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
int32_t vy2 = (int32_t) lrintf(vx2);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
vy2 += voutput_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,303 | 27.444444 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-scalar-lrintf-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__scalar_lrintf_x4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx3 = math_max_f32(vx3, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx3 = math_min_f32(vx3, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
int32_t vy2 = (int32_t) lrintf(vx2);
int32_t vy3 = (int32_t) lrintf(vx3);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
vy2 += voutput_zero_point;
vy3 += voutput_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output[3] = (uint8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,567 | 28.181818 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__sse2_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) vy_lo;
}
}
}
}
| 4,290 | 32.007692 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-sse2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__sse2_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
vyGHIJKLMN = _mm_packus_epi16(vyGHIJKLMN, vyGHIJKLMN);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epu8(vyGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) vy_lo;
}
}
}
}
| 4,990 | 33.902098 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__sse2_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
__m128 vxOPQR = _mm_loadu_ps(input + 24);
__m128 vxSTUV = _mm_loadu_ps(input + 28);
input += 32;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vxOPQR = _mm_mul_ps(vxOPQR, vscale);
vxSTUV = _mm_mul_ps(vxSTUV, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
vxOPQR = _mm_min_ps(vxOPQR, voutput_max_less_zero_point);
vxSTUV = _mm_min_ps(vxSTUV, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
const __m128i vyOPQR = _mm_cvtps_epi32(vxOPQR);
const __m128i vySTUV = _mm_cvtps_epi32(vxSTUV);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
__m128i vyOPQRSTUV = _mm_packs_epi32(vyOPQR, vySTUV);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vyOPQRSTUV = _mm_adds_epi16(vyOPQRSTUV, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
__m128i vyGHIJKLMNOPQRSTUV = _mm_packus_epi16(vyGHIJKLMN, vyOPQRSTUV);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = _mm_max_epu8(vyGHIJKLMNOPQRSTUV, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) vy_lo;
}
}
}
}
| 5,555 | 35.313725 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__sse2_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) vy_lo;
}
}
}
}
| 2,921 | 30.085106 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasm-fmagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasm_fmagic_x1(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,466 | 28.938776 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasm-fmagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasm_fmagic_x2(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output = (uint8_t) vy;
}
}
| 2,201 | 28.756757 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasm-fmagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasm_fmagic_x3(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx2 = __builtin_wasm_max_f32(vx2, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx2 = __builtin_wasm_min_f32(vx2, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,613 | 29.395349 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasm-fmagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasm_fmagic_x4(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx2 = __builtin_wasm_max_f32(vx2, voutput_min_less_zero_point);
vx3 = __builtin_wasm_max_f32(vx3, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx2 = __builtin_wasm_min_f32(vx2, voutput_max_less_zero_point);
vx3 = __builtin_wasm_min_f32(vx3, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (uint8_t) vy0;
output[1] = (uint8_t) vy1;
output[2] = (uint8_t) vy2;
output[3] = (uint8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,939 | 30.276596 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-cvt-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_cvt_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
vy0123456789ABCDEF = wasm_u8x16_max(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,516 | 32.962406 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-cvt-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_cvt_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
vxGHIJ = wasm_f32x4_nearest(vxGHIJ);
vxKLMN = wasm_f32x4_nearest(vxKLMN);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vaccGHIJ = wasm_i32x4_trunc_sat_f32x4(vxGHIJ);
v128_t vaccKLMN = wasm_i32x4_trunc_sat_f32x4(vxKLMN);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = wasm_i16x8_add_sat(vaccGHIJKLMN, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMN = wasm_u8x16_narrow_i16x8(vaccGHIJKLMN, vaccGHIJKLMN);
vy0123456789ABCDEF = wasm_u8x16_max(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = wasm_u8x16_max(vyGHIJKLMN, voutput_min);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = wasm_u8x16_min(vyGHIJKLMN, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, vyGHIJKLMN, 0);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,293 | 35.013605 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-cvt-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_cvt_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
v128_t vxOPQR = wasm_v128_load(input + 24);
v128_t vxSTUV = wasm_v128_load(input + 28);
input += 32;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vxOPQR = wasm_f32x4_mul(vxOPQR, vscale);
vxSTUV = wasm_f32x4_mul(vxSTUV, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
vxGHIJ = wasm_f32x4_nearest(vxGHIJ);
vxKLMN = wasm_f32x4_nearest(vxKLMN);
vxOPQR = wasm_f32x4_nearest(vxOPQR);
vxSTUV = wasm_f32x4_nearest(vxSTUV);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vaccGHIJ = wasm_i32x4_trunc_sat_f32x4(vxGHIJ);
v128_t vaccKLMN = wasm_i32x4_trunc_sat_f32x4(vxKLMN);
v128_t vaccOPQR = wasm_i32x4_trunc_sat_f32x4(vxOPQR);
v128_t vaccSTUV = wasm_i32x4_trunc_sat_f32x4(vxSTUV);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vaccOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = wasm_i16x8_add_sat(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = wasm_i16x8_add_sat(vaccOPQRSTUV, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(vaccGHIJKLMN, vaccOPQRSTUV);
vy0123456789ABCDEF = wasm_u8x16_max(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = wasm_u8x16_max(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = wasm_u8x16_min(vyGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,859 | 36.324841 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-cvt-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_cvt_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_max(vy, voutput_min);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,067 | 30.958333 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-magic-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,862 | 35.022222 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-magic-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
vxGHIJ = wasm_f32x4_add(vxGHIJ, vmagic_bias);
vxKLMN = wasm_f32x4_add(vxKLMN, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
v128_t vaccGHIJ = wasm_i32x4_max(vxGHIJ, vmagic_min);
v128_t vaccKLMN = wasm_i32x4_max(vxKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMN = wasm_u8x16_narrow_i16x8(vaccGHIJKLMN, vaccGHIJKLMN);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = wasm_u8x16_min(vyGHIJKLMN, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, vyGHIJKLMN, 0);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,672 | 37.073826 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-magic-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
v128_t vxOPQR = wasm_v128_load(input + 24);
v128_t vxSTUV = wasm_v128_load(input + 28);
input += 32;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vxOPQR = wasm_f32x4_mul(vxOPQR, vscale);
vxSTUV = wasm_f32x4_mul(vxSTUV, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
vxGHIJ = wasm_f32x4_add(vxGHIJ, vmagic_bias);
vxKLMN = wasm_f32x4_add(vxKLMN, vmagic_bias);
vxOPQR = wasm_f32x4_add(vxOPQR, vmagic_bias);
vxSTUV = wasm_f32x4_add(vxSTUV, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
v128_t vaccGHIJ = wasm_i32x4_max(vxGHIJ, vmagic_min);
v128_t vaccKLMN = wasm_i32x4_max(vxKLMN, vmagic_min);
v128_t vaccOPQR = wasm_i32x4_max(vxOPQR, vmagic_min);
v128_t vaccSTUV = wasm_i32x4_max(vxSTUV, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_zero_point);
vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_zero_point);
vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
const v128_t vaccOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(vaccGHIJKLMN, vaccOPQRSTUV);
vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = wasm_u8x16_min(vyGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 6,313 | 38.4625 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-wasmsimd-magic-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
vy = wasm_u8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,306 | 32.744898 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x64-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,445 | 43.82 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x64-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64_acc4(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc2 = _mm256_add_ps(vacc2, vf6);
vacc3 = _mm256_add_ps(vacc3, vf7);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,599 | 43.736842 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x64(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,325 | 43.868687 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x72-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
input += 72;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 14,311 | 44.148265 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
input += 72;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 14,114 | 44.240385 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x80-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 15,024 | 44.530303 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x80-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc1 = _mm256_add_ps(vacc1, vf6);
vacc2 = _mm256_add_ps(vacc2, vf7);
vacc3 = _mm256_add_ps(vacc3, vf8);
vacc4 = _mm256_add_ps(vacc4, vf9);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 15,255 | 44.404762 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 14,904 | 44.58104 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x96-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
vacc0 = _mm256_add_ps(vacc0, vf10);
vacc1 = _mm256_add_ps(vacc1, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 16,676 | 45.325 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x96-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc3(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc1 = _mm256_add_ps(vacc1, vf10);
vacc2 = _mm256_add_ps(vacc2, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 16,753 | 45.281768 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x96-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96_acc6(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
__m256 vacc5 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc5 = _mm256_add_ps(vacc5, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc3 = _mm256_add_ps(vacc3, vf9);
vacc4 = _mm256_add_ps(vacc4, vf10);
vacc5 = _mm256_add_ps(vacc5, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc4 = _mm256_add_ps(vacc4, vacc5);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 16,984 | 45.154891 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vi_max = _mm256_set1_ps(max);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
// Accumulate computed exponents.
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc0 = _mm256_add_ps(vacc0, vf10);
vacc0 = _mm256_add_ps(vacc0, vf11);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents.
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
// Reduce 8 batch in the SIMD register
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 16,556 | 45.378151 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x128-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,517 | 39.502128 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x128-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc4(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc2 = _mm512_add_ps(vacc2, vf6);
vacc3 = _mm512_add_ps(vacc3, vf7);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,671 | 39.468619 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,397 | 39.508621 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x144-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x144_acc3(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
input += 144;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc1 = _mm512_add_ps(vacc1, vf4);
vacc2 = _mm512_add_ps(vacc2, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,192 | 39.935743 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x144(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
input += 144;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,995 | 39.967213 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x160-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc1 = _mm512_add_ps(vacc1, vf9);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,714 | 40.370656 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x160-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160_acc5(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
__m512 vacc4 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc4 = _mm512_add_ps(vacc4, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc1 = _mm512_add_ps(vacc1, vf6);
vacc2 = _mm512_add_ps(vacc2, vf7);
vacc3 = _mm512_add_ps(vacc3, vf8);
vacc4 = _mm512_add_ps(vacc4, vf9);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc0 = _mm512_add_ps(vacc0, vacc2);
vacc0 = _mm512_add_ps(vacc0, vacc4);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,945 | 40.30566 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x160(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,594 | 40.386719 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x192-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc2(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc1 = _mm512_add_ps(vacc1, vf9);
vacc0 = _mm512_add_ps(vacc0, vf10);
vacc1 = _mm512_add_ps(vacc1, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 11,968 | 41.293286 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x192-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc3(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc1 = _mm512_add_ps(vacc1, vf4);
vacc2 = _mm512_add_ps(vacc2, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
vacc1 = _mm512_add_ps(vacc1, vf10);
vacc2 = _mm512_add_ps(vacc2, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 12,045 | 41.266667 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x192-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192_acc6(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
__m512 vacc4 = _mm512_setzero_ps();
__m512 vacc5 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc4 = _mm512_add_ps(vacc4, vf4);
vacc5 = _mm512_add_ps(vacc5, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
vacc3 = _mm512_add_ps(vacc3, vf9);
vacc4 = _mm512_add_ps(vacc4, vf10);
vacc5 = _mm512_add_ps(vacc5, vf11);
}
// Add up all accumulators to vacc0
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc4 = _mm512_add_ps(vacc4, vacc5);
vacc0 = _mm512_add_ps(vacc0, vacc2);
vacc0 = _mm512_add_ps(vacc0, vacc4);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 12,276 | 41.189003 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddexpminusmax.h>
void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x192(
size_t batch,
const float* input,
float* sum,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vi_max = _mm512_set1_ps(max);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
// Accumulate computed exponents.
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
vacc0 = _mm512_add_ps(vacc0, vf10);
vacc0 = _mm512_add_ps(vacc0, vf11);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
const __m512 vf = _mm512_scalef_ps(vp, vn);
// Accumulate computed exponents.
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 11,848 | 41.317857 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x64-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x64_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e1 = _mm256_max_ps(vmax_e1, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e1 = _mm256_max_ps(vmax_e1, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e1), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e1), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp3, vs3, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp5, vs5, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce01), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce01), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
__m256 vacce = vmax_acce01;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 15,929 | 49.252366 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x64-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x64_acc4(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vaccv2 = _mm256_setzero_ps();
__m256 vaccv3 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
__m256 vacce2 = vminus_inf;
__m256 vacce3 = vminus_inf;
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
__m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
__m256 vmax_e3 = _mm256_max_ps(vacce3, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e1 = _mm256_max_ps(vmax_e1, vn5);
vmax_e2 = _mm256_max_ps(vmax_e2, vn6);
vmax_e3 = _mm256_max_ps(vmax_e3, vn7);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_e3), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e3), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e1), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e2), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e3), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
vaccv3 = _mm256_mul_ps(vaccv3, vaccs3);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp3, vs3, vaccv3);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp5, vs5, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp6, vs6, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp7, vs7, vaccv3);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vmax_acce23 = _mm256_max_ps(vacce2, vacce3);
const __m256 vmax_acce0123 = _mm256_max_ps(vmax_acce01, vmax_acce23);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce0123), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce0123), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce0123), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_acce0123), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
vaccv = _mm256_fmadd_ps(vaccv3, vaccs3, vaccv);
__m256 vacce = vmax_acce0123;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 17,361 | 50.519288 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x64(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
vmax_e0 = _mm256_max_ps(vmax_e0, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e0 = _mm256_max_ps(vmax_e0, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e0 = _mm256_max_ps(vmax_e0, vn7);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e0), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e0), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e0), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp1, vs1, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp5, vs5, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp7, vs7, vaccv0);
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m256 vaccv = vaccv0;
__m256 vacce = vacce0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 14,961 | 48.543046 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x72-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x72_acc3(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vaccv2 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
__m256 vacce2 = vminus_inf;
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
__m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e1 = _mm256_max_ps(vmax_e1, vn4);
vmax_e2 = _mm256_max_ps(vmax_e2, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
vmax_e2 = _mm256_max_ps(vmax_e2, vn8);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e1), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e2), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e2), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp4, vs4, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp5, vs5, vaccv2);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp8, vs8, vaccv2);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vmax_acce2 = vacce2;
const __m256 vmax_acce012 = _mm256_max_ps(vmax_acce01, vmax_acce2);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce012), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce012), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce012), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
__m256 vacce = vmax_acce012;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 17,485 | 50.278592 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x72(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
vmax_e0 = _mm256_max_ps(vmax_e0, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e0 = _mm256_max_ps(vmax_e0, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e0 = _mm256_max_ps(vmax_e0, vn7);
vmax_e0 = _mm256_max_ps(vmax_e0, vn8);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e0), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e0), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e0), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e0), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp1, vs1, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp5, vs5, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp7, vs7, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp8, vs8, vaccv0);
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m256 vaccv = vaccv0;
__m256 vacce = vacce0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 15,762 | 49.04127 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x80-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x80_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e1 = _mm256_max_ps(vmax_e1, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e1 = _mm256_max_ps(vmax_e1, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
vmax_e0 = _mm256_max_ps(vmax_e0, vn8);
vmax_e1 = _mm256_max_ps(vmax_e1, vn9);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e1), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e1), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e0), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e1), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp3, vs3, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp5, vs5, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp8, vs8, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp9, vs9, vaccv1);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce01), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce01), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
__m256 vacce = vmax_acce01;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 17,532 | 50.116618 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x80-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x80_acc5(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vaccv2 = _mm256_setzero_ps();
__m256 vaccv3 = _mm256_setzero_ps();
__m256 vaccv4 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
__m256 vacce2 = vminus_inf;
__m256 vacce3 = vminus_inf;
__m256 vacce4 = vminus_inf;
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
__m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
__m256 vmax_e3 = _mm256_max_ps(vacce3, vn3);
__m256 vmax_e4 = _mm256_max_ps(vacce4, vn4);
vmax_e0 = _mm256_max_ps(vmax_e0, vn5);
vmax_e1 = _mm256_max_ps(vmax_e1, vn6);
vmax_e2 = _mm256_max_ps(vmax_e2, vn7);
vmax_e3 = _mm256_max_ps(vmax_e3, vn8);
vmax_e4 = _mm256_max_ps(vmax_e4, vn9);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_e3), vmin_exponent);
const __m256 vdelta_acce4 = _mm256_max_ps(_mm256_sub_ps(vacce4, vmax_e4), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e3), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e4), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e0), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e1), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e2), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e3), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e4), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
const __m256 vaccs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce4, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
vaccv3 = _mm256_mul_ps(vaccv3, vaccs3);
vaccv4 = _mm256_mul_ps(vaccv4, vaccs4);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp3, vs3, vaccv3);
vaccv4 = _mm256_fmadd_ps(vp4, vs4, vaccv4);
vaccv0 = _mm256_fmadd_ps(vp5, vs5, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp6, vs6, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp7, vs7, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp8, vs8, vaccv3);
vaccv4 = _mm256_fmadd_ps(vp9, vs9, vaccv4);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
vacce4 = vmax_e4;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vmax_acce23 = _mm256_max_ps(vacce2, vacce3);
const __m256 vmax_acce4 = vacce4;
const __m256 vmax_acce0123 = _mm256_max_ps(vmax_acce01, vmax_acce23);
const __m256 vmax_acce01234 = _mm256_max_ps(vmax_acce0123, vmax_acce4);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce01234), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce01234), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce01234), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_acce01234), vmin_exponent);
const __m256 vdelta_acce4 = _mm256_max_ps(_mm256_sub_ps(vacce4, vmax_acce01234), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
const __m256 vaccs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce4, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
vaccv = _mm256_fmadd_ps(vaccv3, vaccs3, vaccv);
vaccv = _mm256_fmadd_ps(vaccv4, vaccs4, vaccv);
__m256 vacce = vmax_acce01234;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 19,727 | 51.748663 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x80(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
vmax_e0 = _mm256_max_ps(vmax_e0, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e0 = _mm256_max_ps(vmax_e0, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e0 = _mm256_max_ps(vmax_e0, vn7);
vmax_e0 = _mm256_max_ps(vmax_e0, vn8);
vmax_e0 = _mm256_max_ps(vmax_e0, vn9);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e0), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e0), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e0), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e0), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e0), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp1, vs1, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp5, vs5, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp7, vs7, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp8, vs8, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp9, vs9, vaccv0);
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m256 vaccv = vaccv0;
__m256 vacce = vacce0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 16,564 | 49.503049 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x96-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
const __m256 vx11 = _mm256_loadu_ps(input + 88);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn11 = _mm256_round_ps(_mm256_mul_ps(vx11, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e1 = _mm256_max_ps(vmax_e1, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e1 = _mm256_max_ps(vmax_e1, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
vmax_e0 = _mm256_max_ps(vmax_e0, vn8);
vmax_e1 = _mm256_max_ps(vmax_e1, vn9);
vmax_e0 = _mm256_max_ps(vmax_e0, vn10);
vmax_e1 = _mm256_max_ps(vmax_e1, vn11);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e1), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e1), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e0), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e1), vmin_exponent);
const __m256 vdelta_e10 = _mm256_max_ps(_mm256_sub_ps(vn10, vmax_e0), vmin_exponent);
const __m256 vdelta_e11 = _mm256_max_ps(_mm256_sub_ps(vn11, vmax_e1), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e10, vmagic_bias)), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e11, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp3, vs3, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp5, vs5, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp8, vs8, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp9, vs9, vaccv1);
vaccv0 = _mm256_fmadd_ps(vp10, vs10, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp11, vs11, vaccv1);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce01), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce01), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
__m256 vacce = vmax_acce01;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 19,194 | 51.01897 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x96-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc3(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vaccv2 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
__m256 vacce2 = vminus_inf;
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
const __m256 vx11 = _mm256_loadu_ps(input + 88);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn11 = _mm256_round_ps(_mm256_mul_ps(vx11, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
__m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e1 = _mm256_max_ps(vmax_e1, vn4);
vmax_e2 = _mm256_max_ps(vmax_e2, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
vmax_e2 = _mm256_max_ps(vmax_e2, vn8);
vmax_e0 = _mm256_max_ps(vmax_e0, vn9);
vmax_e1 = _mm256_max_ps(vmax_e1, vn10);
vmax_e2 = _mm256_max_ps(vmax_e2, vn11);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e1), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e2), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e2), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e0), vmin_exponent);
const __m256 vdelta_e10 = _mm256_max_ps(_mm256_sub_ps(vn10, vmax_e1), vmin_exponent);
const __m256 vdelta_e11 = _mm256_max_ps(_mm256_sub_ps(vn11, vmax_e2), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e10, vmagic_bias)), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e11, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp4, vs4, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp5, vs5, vaccv2);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp8, vs8, vaccv2);
vaccv0 = _mm256_fmadd_ps(vp9, vs9, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp10, vs10, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp11, vs11, vaccv2);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vmax_acce2 = vacce2;
const __m256 vmax_acce012 = _mm256_max_ps(vmax_acce01, vmax_acce2);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce012), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce012), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce012), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
__m256 vacce = vmax_acce012;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 19,949 | 51.5 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x96-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x96_acc6(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vaccv1 = _mm256_setzero_ps();
__m256 vaccv2 = _mm256_setzero_ps();
__m256 vaccv3 = _mm256_setzero_ps();
__m256 vaccv4 = _mm256_setzero_ps();
__m256 vaccv5 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
__m256 vacce1 = vminus_inf;
__m256 vacce2 = vminus_inf;
__m256 vacce3 = vminus_inf;
__m256 vacce4 = vminus_inf;
__m256 vacce5 = vminus_inf;
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
const __m256 vx11 = _mm256_loadu_ps(input + 88);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn11 = _mm256_round_ps(_mm256_mul_ps(vx11, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
__m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
__m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
__m256 vmax_e3 = _mm256_max_ps(vacce3, vn3);
__m256 vmax_e4 = _mm256_max_ps(vacce4, vn4);
__m256 vmax_e5 = _mm256_max_ps(vacce5, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e1 = _mm256_max_ps(vmax_e1, vn7);
vmax_e2 = _mm256_max_ps(vmax_e2, vn8);
vmax_e3 = _mm256_max_ps(vmax_e3, vn9);
vmax_e4 = _mm256_max_ps(vmax_e4, vn10);
vmax_e5 = _mm256_max_ps(vmax_e5, vn11);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_e3), vmin_exponent);
const __m256 vdelta_acce4 = _mm256_max_ps(_mm256_sub_ps(vacce4, vmax_e4), vmin_exponent);
const __m256 vdelta_acce5 = _mm256_max_ps(_mm256_sub_ps(vacce5, vmax_e5), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e3), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e4), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e5), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e1), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e2), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e3), vmin_exponent);
const __m256 vdelta_e10 = _mm256_max_ps(_mm256_sub_ps(vn10, vmax_e4), vmin_exponent);
const __m256 vdelta_e11 = _mm256_max_ps(_mm256_sub_ps(vn11, vmax_e5), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
const __m256 vaccs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce4, vmagic_bias)), 23));
const __m256 vaccs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce5, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e10, vmagic_bias)), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e11, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
vaccv3 = _mm256_mul_ps(vaccv3, vaccs3);
vaccv4 = _mm256_mul_ps(vaccv4, vaccs4);
vaccv5 = _mm256_mul_ps(vaccv5, vaccs5);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp3, vs3, vaccv3);
vaccv4 = _mm256_fmadd_ps(vp4, vs4, vaccv4);
vaccv5 = _mm256_fmadd_ps(vp5, vs5, vaccv5);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv1 = _mm256_fmadd_ps(vp7, vs7, vaccv1);
vaccv2 = _mm256_fmadd_ps(vp8, vs8, vaccv2);
vaccv3 = _mm256_fmadd_ps(vp9, vs9, vaccv3);
vaccv4 = _mm256_fmadd_ps(vp10, vs10, vaccv4);
vaccv5 = _mm256_fmadd_ps(vp11, vs11, vaccv5);
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
vacce4 = vmax_e4;
vacce5 = vmax_e5;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
const __m256 vmax_acce23 = _mm256_max_ps(vacce2, vacce3);
const __m256 vmax_acce45 = _mm256_max_ps(vacce4, vacce5);
const __m256 vmax_acce0123 = _mm256_max_ps(vmax_acce01, vmax_acce23);
const __m256 vmax_acce012345 = _mm256_max_ps(vmax_acce0123, vmax_acce45);
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce012345), vmin_exponent);
const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce012345), vmin_exponent);
const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce012345), vmin_exponent);
const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_acce012345), vmin_exponent);
const __m256 vdelta_acce4 = _mm256_max_ps(_mm256_sub_ps(vacce4, vmax_acce012345), vmin_exponent);
const __m256 vdelta_acce5 = _mm256_max_ps(_mm256_sub_ps(vacce5, vmax_acce012345), vmin_exponent);
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
const __m256 vaccs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce4, vmagic_bias)), 23));
const __m256 vaccs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce5, vmagic_bias)), 23));
__m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
vaccv = _mm256_fmadd_ps(vaccv3, vaccs3, vaccv);
vaccv = _mm256_fmadd_ps(vaccv4, vaccs4, vaccv);
vaccv = _mm256_fmadd_ps(vaccv5, vaccs5, vaccv);
__m256 vacce = vmax_acce012345;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 22,070 | 52.963325 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/raddextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_raddextexp_ukernel__avx2_p5_x96(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
__m256 vaccv0 = _mm256_setzero_ps();
__m256 vacce0 = vminus_inf;
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
const __m256 vx11 = _mm256_loadu_ps(input + 88);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn11 = _mm256_round_ps(_mm256_mul_ps(vx11, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we may add several "extended" floating-point numbers at a time.
__m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
vmax_e0 = _mm256_max_ps(vmax_e0, vn1);
vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
vmax_e0 = _mm256_max_ps(vmax_e0, vn4);
vmax_e0 = _mm256_max_ps(vmax_e0, vn5);
vmax_e0 = _mm256_max_ps(vmax_e0, vn6);
vmax_e0 = _mm256_max_ps(vmax_e0, vn7);
vmax_e0 = _mm256_max_ps(vmax_e0, vn8);
vmax_e0 = _mm256_max_ps(vmax_e0, vn9);
vmax_e0 = _mm256_max_ps(vmax_e0, vn10);
vmax_e0 = _mm256_max_ps(vmax_e0, vn11);
// For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum delta_e at -127.0.
// 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e0), vmin_exponent);
const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
const __m256 vdelta_e4 = _mm256_max_ps(_mm256_sub_ps(vn4, vmax_e0), vmin_exponent);
const __m256 vdelta_e5 = _mm256_max_ps(_mm256_sub_ps(vn5, vmax_e0), vmin_exponent);
const __m256 vdelta_e6 = _mm256_max_ps(_mm256_sub_ps(vn6, vmax_e0), vmin_exponent);
const __m256 vdelta_e7 = _mm256_max_ps(_mm256_sub_ps(vn7, vmax_e0), vmin_exponent);
const __m256 vdelta_e8 = _mm256_max_ps(_mm256_sub_ps(vn8, vmax_e0), vmin_exponent);
const __m256 vdelta_e9 = _mm256_max_ps(_mm256_sub_ps(vn9, vmax_e0), vmin_exponent);
const __m256 vdelta_e10 = _mm256_max_ps(_mm256_sub_ps(vn10, vmax_e0), vmin_exponent);
const __m256 vdelta_e11 = _mm256_max_ps(_mm256_sub_ps(vn11, vmax_e0), vmin_exponent);
// Convert delta-exponents into scale factors:
// - s = exp2(delta_e) when delta_e > -127.0
// - s = 0.0 when delta_e <= -127.0
//
// Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e10, vmagic_bias)), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e11, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp1, vs1, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp4, vs4, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp5, vs5, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp6, vs6, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp7, vs7, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp8, vs8, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp9, vs9, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp10, vs10, vaccv0);
vaccv0 = _mm256_fmadd_ps(vp11, vs11, vaccv0);
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m256 vaccv = vaccv0;
__m256 vacce = vacce0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
__m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Correct reduced argument batch for masked out batch.
vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m256 vmax_e = _mm256_max_ps(vacce, vn);
// For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
// Update accumulated "mantissa" and "exponent" values.
vaccv = _mm256_mul_ps(vaccv, vaccs);
vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
__m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
vaccv = _mm256_mul_ps(vaccv, vaccs);
__m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
_mm_store_ss(&sum[0], vaccv_sum);
_mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
_mm256_zeroupper();
}
| 18,226 | 50.488701 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x128-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e1 = _mm512_max_ps(vmax_e1, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e1 = _mm512_max_ps(vmax_e1, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e1);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e1);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce01);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce01);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
__m512 vacce = vmax_acce01;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 11,365 | 42.547893 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x128-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128_acc4(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vaccv2 = _mm512_setzero_ps();
__m512 vaccv3 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
__m512 vacce2 = vminus_inf;
__m512 vacce3 = vminus_inf;
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
__m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
__m512 vmax_e3 = _mm512_max_ps(vacce3, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e1 = _mm512_max_ps(vmax_e1, vn5);
vmax_e2 = _mm512_max_ps(vmax_e2, vn6);
vmax_e3 = _mm512_max_ps(vmax_e3, vn7);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_e3);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e3);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e1);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e2);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e3);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
vaccv3 = _mm512_scalef_ps(vaccv3, vdelta_acce3);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp7, vdelta_e7));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vmax_acce23 = _mm512_max_ps(vacce2, vacce3);
const __m512 vmax_acce0123 = _mm512_max_ps(vmax_acce01, vmax_acce23);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce0123);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce0123);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce0123);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_acce0123);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv3, vdelta_acce3));
__m512 vacce = vmax_acce0123;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 12,211 | 43.086643 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x128(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
vmax_e0 = _mm512_max_ps(vmax_e0, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e0 = _mm512_max_ps(vmax_e0, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e0 = _mm512_max_ps(vmax_e0, vn7);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e0);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e0);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e0);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp7, vdelta_e7));
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m512 vaccv = vaccv0;
__m512 vacce = vacce0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 10,843 | 42.376 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x144-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vaccv2 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
__m512 vacce2 = vminus_inf;
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
__m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e1 = _mm512_max_ps(vmax_e1, vn4);
vmax_e2 = _mm512_max_ps(vmax_e2, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
vmax_e2 = _mm512_max_ps(vmax_e2, vn8);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e1);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e2);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e2);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp8, vdelta_e8));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vmax_acce2 = vacce2;
const __m512 vmax_acce012 = _mm512_max_ps(vmax_acce01, vmax_acce2);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce012);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce012);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce012);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
__m512 vacce = vmax_acce012;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 12,455 | 43.170213 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
vmax_e0 = _mm512_max_ps(vmax_e0, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e0 = _mm512_max_ps(vmax_e0, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e0 = _mm512_max_ps(vmax_e0, vn7);
vmax_e0 = _mm512_max_ps(vmax_e0, vn8);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e0);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e0);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e0);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp8, vdelta_e8));
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m512 vaccv = vaccv0;
__m512 vacce = vacce0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 11,471 | 42.78626 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x160-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e1 = _mm512_max_ps(vmax_e1, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e1 = _mm512_max_ps(vmax_e1, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
vmax_e0 = _mm512_max_ps(vmax_e0, vn8);
vmax_e1 = _mm512_max_ps(vmax_e1, vn9);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e1);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e1);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e1);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp9, vdelta_e9));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce01);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce01);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
__m512 vacce = vmax_acce01;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 12,622 | 43.291228 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x160-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc5(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vaccv2 = _mm512_setzero_ps();
__m512 vaccv3 = _mm512_setzero_ps();
__m512 vaccv4 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
__m512 vacce2 = vminus_inf;
__m512 vacce3 = vminus_inf;
__m512 vacce4 = vminus_inf;
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
__m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
__m512 vmax_e3 = _mm512_max_ps(vacce3, vn3);
__m512 vmax_e4 = _mm512_max_ps(vacce4, vn4);
vmax_e0 = _mm512_max_ps(vmax_e0, vn5);
vmax_e1 = _mm512_max_ps(vmax_e1, vn6);
vmax_e2 = _mm512_max_ps(vmax_e2, vn7);
vmax_e3 = _mm512_max_ps(vmax_e3, vn8);
vmax_e4 = _mm512_max_ps(vmax_e4, vn9);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_e3);
const __m512 vdelta_acce4 = _mm512_sub_ps(vacce4, vmax_e4);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e3);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e4);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e0);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e1);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e2);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e3);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e4);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
vaccv3 = _mm512_scalef_ps(vaccv3, vdelta_acce3);
vaccv4 = _mm512_scalef_ps(vaccv4, vdelta_acce4);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv4 = _mm512_add_ps(vaccv4, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv4 = _mm512_add_ps(vaccv4, _mm512_scalef_ps(vp9, vdelta_e9));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
vacce4 = vmax_e4;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vmax_acce23 = _mm512_max_ps(vacce2, vacce3);
const __m512 vmax_acce4 = vacce4;
const __m512 vmax_acce0123 = _mm512_max_ps(vmax_acce01, vmax_acce23);
const __m512 vmax_acce01234 = _mm512_max_ps(vmax_acce0123, vmax_acce4);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce01234);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce01234);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce01234);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_acce01234);
const __m512 vdelta_acce4 = _mm512_sub_ps(vacce4, vmax_acce01234);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv3, vdelta_acce3));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv4, vdelta_acce4));
__m512 vacce = vmax_acce01234;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 13,938 | 43.964516 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
vmax_e0 = _mm512_max_ps(vmax_e0, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e0 = _mm512_max_ps(vmax_e0, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e0 = _mm512_max_ps(vmax_e0, vn7);
vmax_e0 = _mm512_max_ps(vmax_e0, vn8);
vmax_e0 = _mm512_max_ps(vmax_e0, vn9);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e0);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e0);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e0);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e0);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp9, vdelta_e9));
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m512 vaccv = vaccv0;
__m512 vacce = vacce0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 12,100 | 43.164234 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x192-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc2(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
const __m512 vx11 = _mm512_loadu_ps(input + 176);
input += 192;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e1 = _mm512_max_ps(vmax_e1, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e1 = _mm512_max_ps(vmax_e1, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
vmax_e0 = _mm512_max_ps(vmax_e0, vn8);
vmax_e1 = _mm512_max_ps(vmax_e1, vn9);
vmax_e0 = _mm512_max_ps(vmax_e0, vn10);
vmax_e1 = _mm512_max_ps(vmax_e1, vn11);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e1);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e1);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e1);
const __m512 vdelta_e10 = _mm512_sub_ps(vn10, vmax_e0);
const __m512 vdelta_e11 = _mm512_sub_ps(vn11, vmax_e1);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp9, vdelta_e9));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp10, vdelta_e10));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp11, vdelta_e11));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce01);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce01);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
__m512 vacce = vmax_acce01;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 13,934 | 44.097087 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x192-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc3(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vaccv2 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
__m512 vacce2 = vminus_inf;
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
const __m512 vx11 = _mm512_loadu_ps(input + 176);
input += 192;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
__m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e1 = _mm512_max_ps(vmax_e1, vn4);
vmax_e2 = _mm512_max_ps(vmax_e2, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
vmax_e2 = _mm512_max_ps(vmax_e2, vn8);
vmax_e0 = _mm512_max_ps(vmax_e0, vn9);
vmax_e1 = _mm512_max_ps(vmax_e1, vn10);
vmax_e2 = _mm512_max_ps(vmax_e2, vn11);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e1);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e2);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e2);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e0);
const __m512 vdelta_e10 = _mm512_sub_ps(vn10, vmax_e1);
const __m512 vdelta_e11 = _mm512_sub_ps(vn11, vmax_e2);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp9, vdelta_e9));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp10, vdelta_e10));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp11, vdelta_e11));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vmax_acce2 = vacce2;
const __m512 vmax_acce012 = _mm512_max_ps(vmax_acce01, vmax_acce2);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce012);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce012);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce012);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
__m512 vacce = vmax_acce012;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 14,396 | 44.273585 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x192-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192_acc6(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vaccv1 = _mm512_setzero_ps();
__m512 vaccv2 = _mm512_setzero_ps();
__m512 vaccv3 = _mm512_setzero_ps();
__m512 vaccv4 = _mm512_setzero_ps();
__m512 vaccv5 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
__m512 vacce1 = vminus_inf;
__m512 vacce2 = vminus_inf;
__m512 vacce3 = vminus_inf;
__m512 vacce4 = vminus_inf;
__m512 vacce5 = vminus_inf;
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
const __m512 vx11 = _mm512_loadu_ps(input + 176);
input += 192;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
__m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
__m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
__m512 vmax_e3 = _mm512_max_ps(vacce3, vn3);
__m512 vmax_e4 = _mm512_max_ps(vacce4, vn4);
__m512 vmax_e5 = _mm512_max_ps(vacce5, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e1 = _mm512_max_ps(vmax_e1, vn7);
vmax_e2 = _mm512_max_ps(vmax_e2, vn8);
vmax_e3 = _mm512_max_ps(vmax_e3, vn9);
vmax_e4 = _mm512_max_ps(vmax_e4, vn10);
vmax_e5 = _mm512_max_ps(vmax_e5, vn11);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_e3);
const __m512 vdelta_acce4 = _mm512_sub_ps(vacce4, vmax_e4);
const __m512 vdelta_acce5 = _mm512_sub_ps(vacce5, vmax_e5);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e3);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e4);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e5);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e1);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e2);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e3);
const __m512 vdelta_e10 = _mm512_sub_ps(vn10, vmax_e4);
const __m512 vdelta_e11 = _mm512_sub_ps(vn11, vmax_e5);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
vaccv3 = _mm512_scalef_ps(vaccv3, vdelta_acce3);
vaccv4 = _mm512_scalef_ps(vaccv4, vdelta_acce4);
vaccv5 = _mm512_scalef_ps(vaccv5, vdelta_acce5);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv4 = _mm512_add_ps(vaccv4, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv5 = _mm512_add_ps(vaccv5, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp9, vdelta_e9));
vaccv4 = _mm512_add_ps(vaccv4, _mm512_scalef_ps(vp10, vdelta_e10));
vaccv5 = _mm512_add_ps(vaccv5, _mm512_scalef_ps(vp11, vdelta_e11));
vacce0 = vmax_e0;
vacce1 = vmax_e1;
vacce2 = vmax_e2;
vacce3 = vmax_e3;
vacce4 = vmax_e4;
vacce5 = vmax_e5;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
const __m512 vmax_acce23 = _mm512_max_ps(vacce2, vacce3);
const __m512 vmax_acce45 = _mm512_max_ps(vacce4, vacce5);
const __m512 vmax_acce0123 = _mm512_max_ps(vmax_acce01, vmax_acce23);
const __m512 vmax_acce012345 = _mm512_max_ps(vmax_acce0123, vmax_acce45);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce012345);
const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce012345);
const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce012345);
const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_acce012345);
const __m512 vdelta_acce4 = _mm512_sub_ps(vacce4, vmax_acce012345);
const __m512 vdelta_acce5 = _mm512_sub_ps(vacce5, vmax_acce012345);
__m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv3, vdelta_acce3));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv4, vdelta_acce4));
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv5, vdelta_acce5));
__m512 vacce = vmax_acce012345;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 15,638 | 44.86217 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddextexp.h>
void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x192(
size_t batch,
const float* input,
float* sum)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(sum != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
__m512 vaccv0 = _mm512_setzero_ps();
__m512 vacce0 = vminus_inf;
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
const __m512 vx11 = _mm512_loadu_ps(input + 176);
input += 192;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av + exp2(be) * bv =
// = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
// = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
// = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
//
// For computational efficiency we add three "extended" floating-point numbers at a time.
__m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
vmax_e0 = _mm512_max_ps(vmax_e0, vn1);
vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
vmax_e0 = _mm512_max_ps(vmax_e0, vn4);
vmax_e0 = _mm512_max_ps(vmax_e0, vn5);
vmax_e0 = _mm512_max_ps(vmax_e0, vn6);
vmax_e0 = _mm512_max_ps(vmax_e0, vn7);
vmax_e0 = _mm512_max_ps(vmax_e0, vn8);
vmax_e0 = _mm512_max_ps(vmax_e0, vn9);
vmax_e0 = _mm512_max_ps(vmax_e0, vn10);
vmax_e0 = _mm512_max_ps(vmax_e0, vn11);
const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e0);
const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
const __m512 vdelta_e4 = _mm512_sub_ps(vn4, vmax_e0);
const __m512 vdelta_e5 = _mm512_sub_ps(vn5, vmax_e0);
const __m512 vdelta_e6 = _mm512_sub_ps(vn6, vmax_e0);
const __m512 vdelta_e7 = _mm512_sub_ps(vn7, vmax_e0);
const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0);
const __m512 vdelta_e9 = _mm512_sub_ps(vn9, vmax_e0);
const __m512 vdelta_e10 = _mm512_sub_ps(vn10, vmax_e0);
const __m512 vdelta_e11 = _mm512_sub_ps(vn11, vmax_e0);
// Update accumulated "mantissa" and "exponent" values
vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp1, vdelta_e1));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp4, vdelta_e4));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp5, vdelta_e5));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp6, vdelta_e6));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp7, vdelta_e7));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp8, vdelta_e8));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp9, vdelta_e9));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp10, vdelta_e10));
vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp11, vdelta_e11));
vacce0 = vmax_e0;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
__m512 vaccv = vaccv0;
__m512 vacce = vacce0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_max_ps(vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
vacce = vmax_e;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
vacce = vmax_e;
}
// Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
const float vmax_acce = _mm512_reduce_max_ps(vacce);
const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
sum[1] = vmax_acce;
_mm256_zeroupper();
}
| 13,412 | 44.010067 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x64-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 10,402 | 38.256604 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x64-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc2 = _mm256_add_ps(vacc2, vf6);
vacc3 = _mm256_add_ps(vacc3, vf7);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 10,556 | 38.245353 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 10,320 | 38.243346 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x72-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x72_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 11,253 | 38.907801 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x72(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 11,094 | 38.910072 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x80-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 11,950 | 39.511864 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x80-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc1 = _mm256_add_ps(vacc1, vf6);
vacc2 = _mm256_add_ps(vacc2, vf7);
vacc3 = _mm256_add_ps(vacc3, vf8);
vacc4 = _mm256_add_ps(vacc4, vf9);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 12,181 | 39.471761 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x80(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 11,868 | 39.508532 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x96-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2, vx11);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
vacc0 = _mm256_add_ps(vacc0, vf10);
vacc1 = _mm256_add_ps(vacc1, vf11);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,568 | 40.750769 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x96-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2, vx11);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc1 = _mm256_add_ps(vacc1, vf10);
vacc2 = _mm256_add_ps(vacc2, vf11);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,645 | 40.730887 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x96-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96_acc6(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
__m256 vacc5 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2, vx11);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc5 = _mm256_add_ps(vacc5, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc3 = _mm256_add_ps(vacc3, vf9);
vacc4 = _mm256_add_ps(vacc4, vf10);
vacc5 = _mm256_add_ps(vacc5, vf11);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc4 = _mm256_add_ps(vacc4, vacc5);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,876 | 40.672673 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x96(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2, vx11);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc0 = _mm256_add_ps(vacc0, vf10);
vacc0 = _mm256_add_ps(vacc0, vf11);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}
| 13,486 | 40.755418 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x128-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 7,909 | 36.84689 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x128-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc2 = _mm512_add_ps(vacc2, vf6);
vacc3 = _mm512_add_ps(vacc3, vf7);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 8,063 | 36.859155 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x128(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 7,827 | 36.816425 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x144-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x144_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
output += 144;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc1 = _mm512_add_ps(vacc1, vf4);
vacc2 = _mm512_add_ps(vacc2, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 8,576 | 37.461883 | 105 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.