repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f16-vmulcaddc/gen/f16-vmulcaddc-c16-minmax-neonfp16arith-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vmulcaddc/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f16_vmulcaddc_minmax_ukernel_c16__neonfp16arith_2x(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
const float16x8_t vscale01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vscale89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
float16x8_t vacc1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbias01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vbias89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
vacc0x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc0x01234567);
vacc0x89ABCDEF = vfmaq_f16(vbias89ABCDEF, vscale89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc1x01234567);
vacc1x89ABCDEF = vfmaq_f16(vbias89ABCDEF, vscale89ABCDEF, vacc1x89ABCDEF);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x01234567)); o0 += 8;
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x89ABCDEF)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x01234567)); o1 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x89ABCDEF)); o1 += 8;
}
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vscale01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbias01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16));
w += 8;
vacc0x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc0x01234567);
vacc1x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc1x01234567);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x01234567)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x01234567)); o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vscale01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 = (const uint16_t*) ((uintptr_t) i0 + c);
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 = (const uint16_t*) ((uintptr_t) i1 + c);
const float16x8_t vbias01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16));
vacc0x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc0x01234567);
vacc1x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc1x01234567);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o0, vreinterpret_u16_f16(vacc0x0123)); o0 += 4;
vst1_u16(o1, vreinterpret_u16_f16(vacc1x0123)); o1 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vacc0x0123), 0); o0 += 2;
vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vacc1x0123), 0); o1 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o0, vreinterpret_u16_f16(vacc0x0123), 0); o0 += 1;
vst1_lane_u16(o1, vreinterpret_u16_f16(vacc1x0123), 0); o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,281 | 41.445946 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vmulcaddc/gen/f16-vmulcaddc-c8-minmax-fma3-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vmulcaddc/fma3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f16_vmulcaddc_minmax_ukernel_c8__fma3_2x(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
w += 16;
vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
vacc0 = _mm256_max_ps(vacc0, vmin);
vacc1 = _mm256_max_ps(vacc1, vmin);
vacc0 = _mm256_min_ps(vacc0, vmax);
vacc1 = _mm256_min_ps(vacc1, vmax);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
o0 += 8;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
vacc0 = _mm256_max_ps(vacc0, vmin);
vacc1 = _mm256_max_ps(vacc1, vmin);
vacc0 = _mm256_min_ps(vacc0, vmax);
vacc1 = _mm256_min_ps(vacc1, vmax);
__m128i vh0 = _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT);
if (c & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o0, vh0);
_mm_storel_epi64((__m128i*) o1, vh1);
vh0 = _mm_unpackhi_epi64(vh0, vh0);
vh1 = _mm_unpackhi_epi64(vh1, vh1);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o0, vh0);
_mm_storeu_si32(o1, vh1);
vh0 = _mm_srli_epi64(vh0, 32);
vh1 = _mm_srli_epi64(vh1, 32);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(uint16_t))) {
*o0 = (uint16_t) _mm_extract_epi16(vh0, 0);
*o1 = (uint16_t) _mm_extract_epi16(vh1, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,238 | 30.87218 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vmulcaddc/gen/f16-vmulcaddc-c8-minmax-neonfp16arith-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vmulcaddc/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f16_vmulcaddc_minmax_ukernel_c8__neonfp16arith_2x(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vscale01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbias01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8));
w += 16;
vacc0x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc0x01234567);
vacc1x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc1x01234567);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x01234567)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x01234567)); o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vscale01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 = (const uint16_t*) ((uintptr_t) i0 + c);
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 = (const uint16_t*) ((uintptr_t) i1 + c);
const float16x8_t vbias01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8));
vacc0x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc0x01234567);
vacc1x01234567 = vfmaq_f16(vbias01234567, vscale01234567, vacc1x01234567);
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o0, vreinterpret_u16_f16(vacc0x0123)); o0 += 4;
vst1_u16(o1, vreinterpret_u16_f16(vacc1x0123)); o1 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vacc0x0123), 0); o0 += 2;
vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vacc1x0123), 0); o1 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o0, vreinterpret_u16_f16(vacc0x0123), 0); o0 += 1;
vst1_lane_u16(o1, vreinterpret_u16_f16(vacc1x0123), 0); o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,480 | 37.62931 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndd-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndd_ukernel__f16c_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_round_ps(vacc0, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
vacc1 = _mm256_round_ps(vacc1, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,484 | 32.133333 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndd-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndd_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEG_INF | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 1,870 | 28.698413 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndd-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndd_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vrndmq_f16(vacc0);
vacc1 = vrndmq_f16(vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndmq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndmq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,014 | 30.484375 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndd-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndd_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndmq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndmq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,620 | 29.018519 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndne-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndne_ukernel__f16c_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_round_ps(vacc0, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
vacc1 = _mm256_round_ps(vacc1, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,501 | 32.36 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndne-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndne_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 1,879 | 28.84127 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndne-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndne_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vrndnq_f16(vacc0);
vacc1 = vrndnq_f16(vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndnq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndnq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,015 | 30.5 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndne-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndne_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndnq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndnq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,621 | 29.037037 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndu-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndu_ukernel__f16c_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_round_ps(vacc0, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
vacc1 = _mm256_round_ps(vacc1, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,484 | 32.133333 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndu-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndu_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_POS_INF | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 1,870 | 28.698413 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndu-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndu_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vrndpq_f16(vacc0);
vacc1 = vrndpq_f16(vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndpq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndpq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,014 | 30.484375 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndu-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndu_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndpq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndpq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,620 | 29.018519 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndz-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndz_ukernel__f16c_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_round_ps(vacc0, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
vacc1 = _mm256_round_ps(vacc1, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,472 | 31.973333 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndz-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndz_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_round_ps(vacc, _MM_FROUND_TO_ZERO | _MM_FROUND_TO_NEAREST_INT);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 1,864 | 28.603175 | 85 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndz-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndz_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vrndq_f16(vacc0);
vacc1 = vrndq_f16(vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,010 | 30.421875 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vrnd/gen/f16-vrndz-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vrnd/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f16_vrndz_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vrndq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vrndq_f16(vacc);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,618 | 28.981481 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 5,892 | 38.550336 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 6,827 | 40.381818 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
float16x8_t vf3 = vdivq_f16(ve3, vd3);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 7,762 | 41.889503 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
float16x8_t vf3 = vdivq_f16(ve3, vd3);
float16x8_t vf4 = vdivq_f16(ve4, vd4);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 8,697 | 43.152284 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
float16x8_t vf3 = vdivq_f16(ve3, vd3);
float16x8_t vf4 = vdivq_f16(ve4, vd4);
float16x8_t vf5 = vdivq_f16(ve5, vd5);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 9,632 | 44.225352 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
float16x8_t vf3 = vdivq_f16(ve3, vd3);
float16x8_t vf4 = vdivq_f16(ve4, vd4);
float16x8_t vf5 = vdivq_f16(ve5, vd5);
float16x8_t vf6 = vdivq_f16(ve6, vd6);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 10,567 | 45.148472 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
const float16x8_t vz7 = vabsq_f16(vx7);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t ve7 = vfmaq_f16(vs7, vp7, vt7);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
const float16x8_t vd7 = vaddq_f16(ve7, vone);
float16x8_t vf0 = vdivq_f16(ve0, vd0);
float16x8_t vf1 = vdivq_f16(ve1, vd1);
float16x8_t vf2 = vdivq_f16(ve2, vd2);
float16x8_t vf3 = vdivq_f16(ve3, vd3);
float16x8_t vf4 = vdivq_f16(ve4, vd4);
float16x8_t vf5 = vdivq_f16(ve5, vd5);
float16x8_t vf6 = vdivq_f16(ve6, vd6);
float16x8_t vf7 = vdivq_f16(ve7, vd7);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vcagtq_f16(vx7, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm7 = vcltq_f16(vx7, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vf7 = vbslq_f16(vm7, vf7, vsubq_f16(vone, vf7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 11,502 | 45.95102 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-aarch64-neonfp16arith-rr2-p2-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__aarch64_neonfp16arith_rr2_p2_div_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vf = vdivq_f16(ve, vd);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 3,927 | 38.28 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 5,414 | 35.1 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
i += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
o += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,277 | 37.280488 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
o += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 7,140 | 39.117978 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
i += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
o += 40;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 8,003 | 40.6875 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
i += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
o += 48;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 8,866 | 42.043689 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
i += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
o += 56;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 9,729 | 43.227273 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
i += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
o += 64;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,592 | 44.269231 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,585 | 33.152381 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 5,583 | 35.025806 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
i += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
o += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,489 | 37.176471 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
const __m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
o += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 7,395 | 38.978378 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
i += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
const __m256 vr3 = _mm256_rcp_ps(vd3);
const __m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
o += 40;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 8,301 | 40.51 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
i += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
const __m256 vr3 = _mm256_rcp_ps(vd3);
const __m256 vr4 = _mm256_rcp_ps(vd4);
const __m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
o += 48;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 9,207 | 41.827907 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
i += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
const __m256 vr3 = _mm256_rcp_ps(vd3);
const __m256 vr4 = _mm256_rcp_ps(vd4);
const __m256 vr5 = _mm256_rcp_ps(vd5);
const __m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
o += 56;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,113 | 42.973913 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
i += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vr0 = _mm256_rcp_ps(vd0);
const __m256 vr1 = _mm256_rcp_ps(vd1);
const __m256 vr2 = _mm256_rcp_ps(vd2);
const __m256 vr3 = _mm256_rcp_ps(vd3);
const __m256 vr4 = _mm256_rcp_ps(vd4);
const __m256 vr5 = _mm256_rcp_ps(vd5);
const __m256 vr6 = _mm256_rcp_ps(vd6);
const __m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
o += 64;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 11,019 | 43.979592 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-avx2-rr1-p2-rcp-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
const __m256 vr = _mm256_rcp_ps(vd);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,667 | 33.280374 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 6,414 | 37.644578 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 7,484 | 39.459459 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
const float16x8_t vadj3 = vfmsq_f16(vone, vr3, vd3);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
vr3 = vfmaq_f16(vr3, vr3, vadj3);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 8,554 | 40.936275 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
const float16x8_t vadj3 = vfmsq_f16(vone, vr3, vd3);
const float16x8_t vadj4 = vfmsq_f16(vone, vr4, vd4);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
vr3 = vfmaq_f16(vr3, vr3, vadj3);
vr4 = vfmaq_f16(vr4, vr4, vadj4);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 9,624 | 42.161435 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
const float16x8_t vadj3 = vfmsq_f16(vone, vr3, vd3);
const float16x8_t vadj4 = vfmsq_f16(vone, vr4, vd4);
const float16x8_t vadj5 = vfmsq_f16(vone, vr5, vd5);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
vr3 = vfmaq_f16(vr3, vr3, vadj3);
vr4 = vfmaq_f16(vr4, vr4, vadj4);
vr5 = vfmaq_f16(vr5, vr5, vadj5);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 10,694 | 43.194215 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
float16x8_t vr6 = vrecpeq_f16(vd6);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
const float16x8_t vadj3 = vfmsq_f16(vone, vr3, vd3);
const float16x8_t vadj4 = vfmsq_f16(vone, vr4, vd4);
const float16x8_t vadj5 = vfmsq_f16(vone, vr5, vd5);
const float16x8_t vadj6 = vfmsq_f16(vone, vr6, vd6);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
vr3 = vfmaq_f16(vr3, vr3, vadj3);
vr4 = vfmaq_f16(vr4, vr4, vadj4);
vr5 = vfmaq_f16(vr5, vr5, vadj5);
vr6 = vfmaq_f16(vr6, vr6, vadj6);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
float16x8_t vf6 = vmulq_f16(ve6, vr6);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 11,764 | 44.076628 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
const float16x8_t vz7 = vabsq_f16(vx7);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t ve7 = vfmaq_f16(vs7, vp7, vt7);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
const float16x8_t vd7 = vaddq_f16(ve7, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
float16x8_t vr6 = vrecpeq_f16(vd6);
float16x8_t vr7 = vrecpeq_f16(vd7);
const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
const float16x8_t vadj3 = vfmsq_f16(vone, vr3, vd3);
const float16x8_t vadj4 = vfmsq_f16(vone, vr4, vd4);
const float16x8_t vadj5 = vfmsq_f16(vone, vr5, vd5);
const float16x8_t vadj6 = vfmsq_f16(vone, vr6, vd6);
const float16x8_t vadj7 = vfmsq_f16(vone, vr7, vd7);
vr0 = vfmaq_f16(vr0, vr0, vadj0);
vr1 = vfmaq_f16(vr1, vr1, vadj1);
vr2 = vfmaq_f16(vr2, vr2, vadj2);
vr3 = vfmaq_f16(vr3, vr3, vadj3);
vr4 = vfmaq_f16(vr4, vr4, vadj4);
vr5 = vfmaq_f16(vr5, vr5, vadj5);
vr6 = vfmaq_f16(vr6, vr6, vadj6);
vr7 = vfmaq_f16(vr7, vr7, vadj7);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
float16x8_t vf6 = vmulq_f16(ve6, vr6);
float16x8_t vf7 = vmulq_f16(ve7, vr7);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vcagtq_f16(vx7, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm7 = vcltq_f16(vx7, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vf7 = vbslq_f16(vm7, vf7, vsubq_f16(vone, vf7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 12,834 | 44.839286 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
vr = vfmaq_f16(vr, vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 4,176 | 37.675926 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 6,382 | 37.451807 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 7,443 | 39.237838 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
vr3 = vmulq_f16(vr3, vadj3);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 8,504 | 40.691176 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
const float16x8_t vadj4 = vrecpsq_f16(vr4, vd4);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
vr3 = vmulq_f16(vr3, vadj3);
vr4 = vmulq_f16(vr4, vadj4);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 9,565 | 41.896861 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
const float16x8_t vadj4 = vrecpsq_f16(vr4, vd4);
const float16x8_t vadj5 = vrecpsq_f16(vr5, vd5);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
vr3 = vmulq_f16(vr3, vadj3);
vr4 = vmulq_f16(vr4, vadj4);
vr5 = vmulq_f16(vr5, vadj5);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 10,626 | 42.913223 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
float16x8_t vr6 = vrecpeq_f16(vd6);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
const float16x8_t vadj4 = vrecpsq_f16(vr4, vd4);
const float16x8_t vadj5 = vrecpsq_f16(vr5, vd5);
const float16x8_t vadj6 = vrecpsq_f16(vr6, vd6);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
vr3 = vmulq_f16(vr3, vadj3);
vr4 = vmulq_f16(vr4, vadj4);
vr5 = vmulq_f16(vr5, vadj5);
vr6 = vmulq_f16(vr6, vadj6);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
float16x8_t vf6 = vmulq_f16(ve6, vr6);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 11,687 | 43.781609 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz0 = vabsq_f16(vx0);
const float16x8_t vz1 = vabsq_f16(vx1);
const float16x8_t vz2 = vabsq_f16(vx2);
const float16x8_t vz3 = vabsq_f16(vx3);
const float16x8_t vz4 = vabsq_f16(vx4);
const float16x8_t vz5 = vabsq_f16(vx5);
const float16x8_t vz6 = vabsq_f16(vx6);
const float16x8_t vz7 = vabsq_f16(vx7);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
const float16x8_t ve7 = vfmaq_f16(vs7, vp7, vt7);
const float16x8_t vd0 = vaddq_f16(ve0, vone);
const float16x8_t vd1 = vaddq_f16(ve1, vone);
const float16x8_t vd2 = vaddq_f16(ve2, vone);
const float16x8_t vd3 = vaddq_f16(ve3, vone);
const float16x8_t vd4 = vaddq_f16(ve4, vone);
const float16x8_t vd5 = vaddq_f16(ve5, vone);
const float16x8_t vd6 = vaddq_f16(ve6, vone);
const float16x8_t vd7 = vaddq_f16(ve7, vone);
float16x8_t vr0 = vrecpeq_f16(vd0);
float16x8_t vr1 = vrecpeq_f16(vd1);
float16x8_t vr2 = vrecpeq_f16(vd2);
float16x8_t vr3 = vrecpeq_f16(vd3);
float16x8_t vr4 = vrecpeq_f16(vd4);
float16x8_t vr5 = vrecpeq_f16(vd5);
float16x8_t vr6 = vrecpeq_f16(vd6);
float16x8_t vr7 = vrecpeq_f16(vd7);
const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
const float16x8_t vadj4 = vrecpsq_f16(vr4, vd4);
const float16x8_t vadj5 = vrecpsq_f16(vr5, vd5);
const float16x8_t vadj6 = vrecpsq_f16(vr6, vd6);
const float16x8_t vadj7 = vrecpsq_f16(vr7, vd7);
vr0 = vmulq_f16(vr0, vadj0);
vr1 = vmulq_f16(vr1, vadj1);
vr2 = vmulq_f16(vr2, vadj2);
vr3 = vmulq_f16(vr3, vadj3);
vr4 = vmulq_f16(vr4, vadj4);
vr5 = vmulq_f16(vr5, vadj5);
vr6 = vmulq_f16(vr6, vadj6);
vr7 = vmulq_f16(vr7, vadj7);
float16x8_t vf0 = vmulq_f16(ve0, vr0);
float16x8_t vf1 = vmulq_f16(ve1, vr1);
float16x8_t vf2 = vmulq_f16(ve2, vr2);
float16x8_t vf3 = vmulq_f16(ve3, vr3);
float16x8_t vf4 = vmulq_f16(ve4, vr4);
float16x8_t vf5 = vmulq_f16(ve5, vr5);
float16x8_t vf6 = vmulq_f16(ve6, vr6);
float16x8_t vf7 = vmulq_f16(ve7, vr7);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vcagtq_f16(vx7, vdenorm_cutoff)));
const uint16x8_t vm0 = vcltq_f16(vx0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm1 = vcltq_f16(vx1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm2 = vcltq_f16(vx2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm3 = vcltq_f16(vx3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm4 = vcltq_f16(vx4, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm5 = vcltq_f16(vx5, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm6 = vcltq_f16(vx6, vreinterpretq_f16_u16(vmovq_n_u16(0)));
const uint16x8_t vm7 = vcltq_f16(vx7, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
vf7 = vbslq_f16(vm7, vf7, vsubq_f16(vone, vf7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 12,748 | 44.532143 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsigmoid/gen/f16-vsigmoid-neonfp16arith-rr2-p2-nr1recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsigmoid/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_log2e));
const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_hi));
const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vone = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3C00))); // 1.0h
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vz = vabsq_f16(vx);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
vt = vfmaq_f16(vt, vn, vln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
const float16x8_t ve = vfmaq_f16(vs, vp, vt);
const float16x8_t vd = vaddq_f16(ve, vone);
float16x8_t vr = vrecpeq_f16(vd);
const float16x8_t vadj = vrecpsq_f16(vr, vd);
vr = vmulq_f16(vr, vadj);
float16x8_t vf = vmulq_f16(ve, vr);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
const uint16x8_t vm = vcltq_f16(vx, vreinterpretq_f16_u16(vmovq_n_u16(0)));
vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
}
}
}
| 4,162 | 37.546296 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-aarch64-neonfp16arith-sqrt-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__aarch64_neonfp16arith_sqrt_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vsqrtq_f16(vacc0);
vacc1 = vsqrtq_f16(vacc1);
vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vsqrtq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
float16x4_t vacc_lo = vsqrt_f16(vget_low_f16(vacc));
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vsqrt_f16(vget_high_f16(vacc));
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,007 | 31.387097 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-aarch64-neonfp16arith-sqrt-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__aarch64_neonfp16arith_sqrt_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vsqrtq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
float16x4_t vacc_lo = vsqrt_f16(vget_low_f16(vacc));
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vsqrt_f16(vget_high_f16(vacc));
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,613 | 30.038462 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-f16c-sqrt-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/f16c-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__f16c_sqrt_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_sqrt_ps(vacc0);
vacc1 = _mm256_sqrt_ps(vacc1);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_sqrt_ps(vacc);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_sqrt_ps(vacc);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
o += 4;
vh = _mm_unpackhi_epi64(vh, vh);
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
o += 2;
vh = _mm_srli_epi64(vh, 32);
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,188 | 30.271429 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-f16c-sqrt-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/f16c-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__f16c_sqrt_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_sqrt_ps(vacc);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_sqrt_ps(vacc);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
o += 4;
vh = _mm_unpackhi_epi64(vh, vh);
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
o += 2;
vh = _mm_srli_epi64(vh, 32);
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 1,678 | 27.948276 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-fp16arith-sqrt-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/fp16arith-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__fp16arith_sqrt_x1(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16_t* i = (const float16_t*) input;
float16_t* o = (float16_t*) output;
do {
float16_t vacc = *i++;
vacc = vsqrth_f16(vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 980 | 23.525 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-fp16arith-sqrt-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/fp16arith-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__fp16arith_sqrt_x2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16_t* i = (const float16_t*) input;
float16_t* o = (float16_t*) output;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = *i++;
float16_t vacc1 = *i++;
vacc0 = vsqrth_f16(vacc0);
vacc1 = vsqrth_f16(vacc1);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *i;
vacc = vsqrth_f16(vacc);
*o = vacc;
}
}
| 1,184 | 23.183673 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-fp16arith-sqrt-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/fp16arith-sqrt.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__fp16arith_sqrt_x4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16_t* i = (const float16_t*) input;
float16_t* o = (float16_t*) output;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = *i++;
float16_t vacc1 = *i++;
float16_t vacc2 = *i++;
float16_t vacc3 = *i++;
vacc0 = vsqrth_f16(vacc0);
vacc1 = vsqrth_f16(vacc1);
vacc2 = vsqrth_f16(vacc2);
vacc3 = vsqrth_f16(vacc3);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *i++;
vacc = vsqrth_f16(vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,417 | 23.448276 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-neonfp16arith-nr1fma1adj-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-nr1fma1adj.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__neonfp16arith_nr1fma1adj_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vpositive_infinity = vmovq_n_u16(UINT16_C(0x7C00));
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
const uint16x8_t vexp4_mask = vmovq_n_u16(UINT16_C(0x7800));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vbslq_f16(vexp4_mask, vhalf, vi0);
const int16x8_t vexp4i0 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi0), vexp4_mask));
const float16x8_t vx1 = vbslq_f16(vexp4_mask, vhalf, vi1);
const int16x8_t vexp4i1 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi1), vexp4_mask));
const float16x8_t vrsqrtx0 = vrsqrteq_f16(vx0);
const int16x8_t vpostscale0 = vhsubq_s16(vexp4i0, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx1 = vrsqrteq_f16(vx1);
const int16x8_t vpostscale1 = vhsubq_s16(vexp4i1, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx0 = vmulq_f16(vrsqrtx0, vx0);
const float16x8_t vhalfrsqrtx0 = vmulq_f16(vrsqrtx0, vhalf);
uint16x8_t vspecial_mask0 = vcgeq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
float16x8_t vsqrtx1 = vmulq_f16(vrsqrtx1, vx1);
const float16x8_t vhalfrsqrtx1 = vmulq_f16(vrsqrtx1, vhalf);
uint16x8_t vspecial_mask1 = vcgeq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
const float16x8_t vresidual0 = vfmsq_f16(vhalf, vsqrtx0, vhalfrsqrtx0);
const uint16x8_t vzero_mask0 = vceqq_f16(vi0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value0 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual1 = vfmsq_f16(vhalf, vsqrtx1, vhalfrsqrtx1);
const uint16x8_t vzero_mask1 = vceqq_f16(vi1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value1 = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx0 = vfmaq_f16(vsqrtx0, vresidual0, vsqrtx0);
vspecial_mask0 = vorrq_u16(vspecial_mask0, vzero_mask0);
const uint16x8_t vinfinity_mask0 = vceqq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
vsqrtx1 = vfmaq_f16(vsqrtx1, vresidual1, vsqrtx1);
vspecial_mask1 = vorrq_u16(vspecial_mask1, vzero_mask1);
const uint16x8_t vinfinity_mask1 = vceqq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
const float16x8_t vadjustment0 = vfmsq_f16(vx0, vsqrtx0, vsqrtx0);
const uint16x8_t vinput_mask0 = vorrq_u16(vinfinity_mask0, vzero_mask0);
const float16x8_t vadjustment1 = vfmsq_f16(vx1, vsqrtx1, vsqrtx1);
const uint16x8_t vinput_mask1 = vorrq_u16(vinfinity_mask1, vzero_mask1);
vsqrtx0 = vfmaq_f16(vsqrtx0, vhalfrsqrtx0, vadjustment0);
vspecial_value0 = vbslq_u16(vinput_mask0, vreinterpretq_u16_f16(vi0), vspecial_value0);
vsqrtx1 = vfmaq_f16(vsqrtx1, vhalfrsqrtx1, vadjustment1);
vspecial_value1 = vbslq_u16(vinput_mask1, vreinterpretq_u16_f16(vi1), vspecial_value1);
float16x8_t vy0 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx0), vpostscale0));
float16x8_t vy1 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx1), vpostscale1));
vy0 = vbslq_f16(vspecial_mask0, vreinterpretq_f16_u16(vspecial_value0), vy0);
vy1 = vbslq_f16(vspecial_mask1, vreinterpretq_f16_u16(vspecial_value1), vy1);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 7,784 | 45.616766 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-neonfp16arith-nr1fma1adj-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-nr1fma1adj.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__neonfp16arith_nr1fma1adj_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vpositive_infinity = vmovq_n_u16(UINT16_C(0x7C00));
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
const uint16x8_t vexp4_mask = vmovq_n_u16(UINT16_C(0x7800));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vbslq_f16(vexp4_mask, vhalf, vi0);
const int16x8_t vexp4i0 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi0), vexp4_mask));
const float16x8_t vx1 = vbslq_f16(vexp4_mask, vhalf, vi1);
const int16x8_t vexp4i1 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi1), vexp4_mask));
const float16x8_t vx2 = vbslq_f16(vexp4_mask, vhalf, vi2);
const int16x8_t vexp4i2 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi2), vexp4_mask));
const float16x8_t vrsqrtx0 = vrsqrteq_f16(vx0);
const int16x8_t vpostscale0 = vhsubq_s16(vexp4i0, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx1 = vrsqrteq_f16(vx1);
const int16x8_t vpostscale1 = vhsubq_s16(vexp4i1, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx2 = vrsqrteq_f16(vx2);
const int16x8_t vpostscale2 = vhsubq_s16(vexp4i2, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx0 = vmulq_f16(vrsqrtx0, vx0);
const float16x8_t vhalfrsqrtx0 = vmulq_f16(vrsqrtx0, vhalf);
uint16x8_t vspecial_mask0 = vcgeq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
float16x8_t vsqrtx1 = vmulq_f16(vrsqrtx1, vx1);
const float16x8_t vhalfrsqrtx1 = vmulq_f16(vrsqrtx1, vhalf);
uint16x8_t vspecial_mask1 = vcgeq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
float16x8_t vsqrtx2 = vmulq_f16(vrsqrtx2, vx2);
const float16x8_t vhalfrsqrtx2 = vmulq_f16(vrsqrtx2, vhalf);
uint16x8_t vspecial_mask2 = vcgeq_u16(vreinterpretq_u16_f16(vi2), vpositive_infinity);
const float16x8_t vresidual0 = vfmsq_f16(vhalf, vsqrtx0, vhalfrsqrtx0);
const uint16x8_t vzero_mask0 = vceqq_f16(vi0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value0 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual1 = vfmsq_f16(vhalf, vsqrtx1, vhalfrsqrtx1);
const uint16x8_t vzero_mask1 = vceqq_f16(vi1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value1 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual2 = vfmsq_f16(vhalf, vsqrtx2, vhalfrsqrtx2);
const uint16x8_t vzero_mask2 = vceqq_f16(vi2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value2 = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx0 = vfmaq_f16(vsqrtx0, vresidual0, vsqrtx0);
vspecial_mask0 = vorrq_u16(vspecial_mask0, vzero_mask0);
const uint16x8_t vinfinity_mask0 = vceqq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
vsqrtx1 = vfmaq_f16(vsqrtx1, vresidual1, vsqrtx1);
vspecial_mask1 = vorrq_u16(vspecial_mask1, vzero_mask1);
const uint16x8_t vinfinity_mask1 = vceqq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
vsqrtx2 = vfmaq_f16(vsqrtx2, vresidual2, vsqrtx2);
vspecial_mask2 = vorrq_u16(vspecial_mask2, vzero_mask2);
const uint16x8_t vinfinity_mask2 = vceqq_u16(vreinterpretq_u16_f16(vi2), vpositive_infinity);
const float16x8_t vadjustment0 = vfmsq_f16(vx0, vsqrtx0, vsqrtx0);
const uint16x8_t vinput_mask0 = vorrq_u16(vinfinity_mask0, vzero_mask0);
const float16x8_t vadjustment1 = vfmsq_f16(vx1, vsqrtx1, vsqrtx1);
const uint16x8_t vinput_mask1 = vorrq_u16(vinfinity_mask1, vzero_mask1);
const float16x8_t vadjustment2 = vfmsq_f16(vx2, vsqrtx2, vsqrtx2);
const uint16x8_t vinput_mask2 = vorrq_u16(vinfinity_mask2, vzero_mask2);
vsqrtx0 = vfmaq_f16(vsqrtx0, vhalfrsqrtx0, vadjustment0);
vspecial_value0 = vbslq_u16(vinput_mask0, vreinterpretq_u16_f16(vi0), vspecial_value0);
vsqrtx1 = vfmaq_f16(vsqrtx1, vhalfrsqrtx1, vadjustment1);
vspecial_value1 = vbslq_u16(vinput_mask1, vreinterpretq_u16_f16(vi1), vspecial_value1);
vsqrtx2 = vfmaq_f16(vsqrtx2, vhalfrsqrtx2, vadjustment2);
vspecial_value2 = vbslq_u16(vinput_mask2, vreinterpretq_u16_f16(vi2), vspecial_value2);
float16x8_t vy0 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx0), vpostscale0));
float16x8_t vy1 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx1), vpostscale1));
float16x8_t vy2 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx2), vpostscale2));
vy0 = vbslq_f16(vspecial_mask0, vreinterpretq_f16_u16(vspecial_value0), vy0);
vy1 = vbslq_f16(vspecial_mask1, vreinterpretq_f16_u16(vspecial_value1), vy1);
vy2 = vbslq_f16(vspecial_mask2, vreinterpretq_f16_u16(vspecial_value2), vy2);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 9,346 | 48.718085 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-neonfp16arith-nr1fma1adj-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-nr1fma1adj.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__neonfp16arith_nr1fma1adj_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vpositive_infinity = vmovq_n_u16(UINT16_C(0x7C00));
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
const uint16x8_t vexp4_mask = vmovq_n_u16(UINT16_C(0x7800));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vbslq_f16(vexp4_mask, vhalf, vi0);
const int16x8_t vexp4i0 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi0), vexp4_mask));
const float16x8_t vx1 = vbslq_f16(vexp4_mask, vhalf, vi1);
const int16x8_t vexp4i1 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi1), vexp4_mask));
const float16x8_t vx2 = vbslq_f16(vexp4_mask, vhalf, vi2);
const int16x8_t vexp4i2 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi2), vexp4_mask));
const float16x8_t vx3 = vbslq_f16(vexp4_mask, vhalf, vi3);
const int16x8_t vexp4i3 = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi3), vexp4_mask));
const float16x8_t vrsqrtx0 = vrsqrteq_f16(vx0);
const int16x8_t vpostscale0 = vhsubq_s16(vexp4i0, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx1 = vrsqrteq_f16(vx1);
const int16x8_t vpostscale1 = vhsubq_s16(vexp4i1, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx2 = vrsqrteq_f16(vx2);
const int16x8_t vpostscale2 = vhsubq_s16(vexp4i2, vreinterpretq_s16_f16(vhalf));
const float16x8_t vrsqrtx3 = vrsqrteq_f16(vx3);
const int16x8_t vpostscale3 = vhsubq_s16(vexp4i3, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx0 = vmulq_f16(vrsqrtx0, vx0);
const float16x8_t vhalfrsqrtx0 = vmulq_f16(vrsqrtx0, vhalf);
uint16x8_t vspecial_mask0 = vcgeq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
float16x8_t vsqrtx1 = vmulq_f16(vrsqrtx1, vx1);
const float16x8_t vhalfrsqrtx1 = vmulq_f16(vrsqrtx1, vhalf);
uint16x8_t vspecial_mask1 = vcgeq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
float16x8_t vsqrtx2 = vmulq_f16(vrsqrtx2, vx2);
const float16x8_t vhalfrsqrtx2 = vmulq_f16(vrsqrtx2, vhalf);
uint16x8_t vspecial_mask2 = vcgeq_u16(vreinterpretq_u16_f16(vi2), vpositive_infinity);
float16x8_t vsqrtx3 = vmulq_f16(vrsqrtx3, vx3);
const float16x8_t vhalfrsqrtx3 = vmulq_f16(vrsqrtx3, vhalf);
uint16x8_t vspecial_mask3 = vcgeq_u16(vreinterpretq_u16_f16(vi3), vpositive_infinity);
const float16x8_t vresidual0 = vfmsq_f16(vhalf, vsqrtx0, vhalfrsqrtx0);
const uint16x8_t vzero_mask0 = vceqq_f16(vi0, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value0 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual1 = vfmsq_f16(vhalf, vsqrtx1, vhalfrsqrtx1);
const uint16x8_t vzero_mask1 = vceqq_f16(vi1, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value1 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual2 = vfmsq_f16(vhalf, vsqrtx2, vhalfrsqrtx2);
const uint16x8_t vzero_mask2 = vceqq_f16(vi2, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value2 = vmovq_n_u16(UINT16_C(0x7E00));
const float16x8_t vresidual3 = vfmsq_f16(vhalf, vsqrtx3, vhalfrsqrtx3);
const uint16x8_t vzero_mask3 = vceqq_f16(vi3, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value3 = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx0 = vfmaq_f16(vsqrtx0, vresidual0, vsqrtx0);
vspecial_mask0 = vorrq_u16(vspecial_mask0, vzero_mask0);
const uint16x8_t vinfinity_mask0 = vceqq_u16(vreinterpretq_u16_f16(vi0), vpositive_infinity);
vsqrtx1 = vfmaq_f16(vsqrtx1, vresidual1, vsqrtx1);
vspecial_mask1 = vorrq_u16(vspecial_mask1, vzero_mask1);
const uint16x8_t vinfinity_mask1 = vceqq_u16(vreinterpretq_u16_f16(vi1), vpositive_infinity);
vsqrtx2 = vfmaq_f16(vsqrtx2, vresidual2, vsqrtx2);
vspecial_mask2 = vorrq_u16(vspecial_mask2, vzero_mask2);
const uint16x8_t vinfinity_mask2 = vceqq_u16(vreinterpretq_u16_f16(vi2), vpositive_infinity);
vsqrtx3 = vfmaq_f16(vsqrtx3, vresidual3, vsqrtx3);
vspecial_mask3 = vorrq_u16(vspecial_mask3, vzero_mask3);
const uint16x8_t vinfinity_mask3 = vceqq_u16(vreinterpretq_u16_f16(vi3), vpositive_infinity);
const float16x8_t vadjustment0 = vfmsq_f16(vx0, vsqrtx0, vsqrtx0);
const uint16x8_t vinput_mask0 = vorrq_u16(vinfinity_mask0, vzero_mask0);
const float16x8_t vadjustment1 = vfmsq_f16(vx1, vsqrtx1, vsqrtx1);
const uint16x8_t vinput_mask1 = vorrq_u16(vinfinity_mask1, vzero_mask1);
const float16x8_t vadjustment2 = vfmsq_f16(vx2, vsqrtx2, vsqrtx2);
const uint16x8_t vinput_mask2 = vorrq_u16(vinfinity_mask2, vzero_mask2);
const float16x8_t vadjustment3 = vfmsq_f16(vx3, vsqrtx3, vsqrtx3);
const uint16x8_t vinput_mask3 = vorrq_u16(vinfinity_mask3, vzero_mask3);
vsqrtx0 = vfmaq_f16(vsqrtx0, vhalfrsqrtx0, vadjustment0);
vspecial_value0 = vbslq_u16(vinput_mask0, vreinterpretq_u16_f16(vi0), vspecial_value0);
vsqrtx1 = vfmaq_f16(vsqrtx1, vhalfrsqrtx1, vadjustment1);
vspecial_value1 = vbslq_u16(vinput_mask1, vreinterpretq_u16_f16(vi1), vspecial_value1);
vsqrtx2 = vfmaq_f16(vsqrtx2, vhalfrsqrtx2, vadjustment2);
vspecial_value2 = vbslq_u16(vinput_mask2, vreinterpretq_u16_f16(vi2), vspecial_value2);
vsqrtx3 = vfmaq_f16(vsqrtx3, vhalfrsqrtx3, vadjustment3);
vspecial_value3 = vbslq_u16(vinput_mask3, vreinterpretq_u16_f16(vi3), vspecial_value3);
float16x8_t vy0 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx0), vpostscale0));
float16x8_t vy1 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx1), vpostscale1));
float16x8_t vy2 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx2), vpostscale2));
float16x8_t vy3 = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx3), vpostscale3));
vy0 = vbslq_f16(vspecial_mask0, vreinterpretq_f16_u16(vspecial_value0), vy0);
vy1 = vbslq_f16(vspecial_mask1, vreinterpretq_f16_u16(vspecial_value1), vy1);
vy2 = vbslq_f16(vspecial_mask2, vreinterpretq_f16_u16(vspecial_value2), vy2);
vy3 = vbslq_f16(vspecial_mask3, vreinterpretq_f16_u16(vspecial_value3), vy3);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 10,908 | 51.196172 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vsqrt/gen/f16-vsqrt-neonfp16arith-nr1fma1adj-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vsqrt/neonfp16arith-nr1fma1adj.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vsqrt_ukernel__neonfp16arith_nr1fma1adj_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vpositive_infinity = vmovq_n_u16(UINT16_C(0x7C00));
const float16x8_t vhalf = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x3800))); // 0.5h
const uint16x8_t vexp4_mask = vmovq_n_u16(UINT16_C(0x7800));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vbslq_f16(vexp4_mask, vhalf, vi);
const int16x8_t vexp4i = vreinterpretq_s16_u16(vandq_u16(vreinterpretq_u16_f16(vi), vexp4_mask));
const float16x8_t vrsqrtx = vrsqrteq_f16(vx);
const int16x8_t vpostscale = vhsubq_s16(vexp4i, vreinterpretq_s16_f16(vhalf));
float16x8_t vsqrtx = vmulq_f16(vrsqrtx, vx);
const float16x8_t vhalfrsqrtx = vmulq_f16(vrsqrtx, vhalf);
uint16x8_t vspecial_mask = vcgeq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vresidual = vfmsq_f16(vhalf, vsqrtx, vhalfrsqrtx);
const uint16x8_t vzero_mask = vceqq_f16(vi, vreinterpretq_f16_u16(vmovq_n_u16(0)));
uint16x8_t vspecial_value = vmovq_n_u16(UINT16_C(0x7E00));
vsqrtx = vfmaq_f16(vsqrtx, vresidual, vsqrtx);
vspecial_mask = vorrq_u16(vspecial_mask, vzero_mask);
const uint16x8_t vinfinity_mask = vceqq_u16(vreinterpretq_u16_f16(vi), vpositive_infinity);
const float16x8_t vadjustment = vfmsq_f16(vx, vsqrtx, vsqrtx);
const uint16x8_t vinput_mask = vorrq_u16(vinfinity_mask, vzero_mask);
vsqrtx = vfmaq_f16(vsqrtx, vhalfrsqrtx, vadjustment);
vspecial_value = vbslq_u16(vinput_mask, vreinterpretq_u16_f16(vi), vspecial_value);
float16x8_t vy = vreinterpretq_f16_s16(vaddq_s16(vreinterpretq_s16_f16(vsqrtx), vpostscale));
vy = vbslq_f16(vspecial_mask, vreinterpretq_f16_u16(vspecial_value), vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 4,570 | 39.451327 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x16(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 2 * sizeof(float16x8_t); n -= 2 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 5,464 | 36.951389 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x24(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 3 * sizeof(float16x8_t); n -= 3 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 6,319 | 38.5 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x32(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 4 * sizeof(float16x8_t); n -= 4 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 7,174 | 39.767045 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x40(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 5 * sizeof(float16x8_t); n -= 5 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 8,029 | 40.822917 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x48(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 6 * sizeof(float16x8_t); n -= 6 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vz5 = vabsq_f16(vx5);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
vz5 = vminq_f16(vz5, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn5 = vsubq_f16(vn5, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
vp5 = vfmsq_f16(vtwo, vp5, vt5);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vts5 = vmulq_f16(vt5, vs5);
const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
float16x8_t vy5 = vdivq_f16(vemo5, vepo5);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vy5 = vbslq_f16(vsign_mask, vx5, vy5);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 8,884 | 41.716346 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x56(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 7 * sizeof(float16x8_t); n -= 7 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vz6 = vabsq_f16(vx6);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
vz5 = vminq_f16(vz5, vsat_cutoff);
vz6 = vminq_f16(vz6, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn5 = vsubq_f16(vn5, vmagic_bias);
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn6 = vsubq_f16(vn6, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2);
const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5);
float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
vp5 = vfmsq_f16(vtwo, vp5, vt5);
vp6 = vfmsq_f16(vtwo, vp6, vt6);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vts5 = vmulq_f16(vt5, vs5);
const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one);
const float16x8_t vts6 = vmulq_f16(vt6, vs6);
const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5);
const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo);
const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
float16x8_t vy5 = vdivq_f16(vemo5, vepo5);
float16x8_t vy6 = vdivq_f16(vemo6, vepo6);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vy5 = vbslq_f16(vsign_mask, vx5, vy5);
vy6 = vbslq_f16(vsign_mask, vx6, vy6);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 9,739 | 42.482143 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x64(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 8 * sizeof(float16x8_t); n -= 8 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vz7 = vabsq_f16(vx7);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
vz5 = vminq_f16(vz5, vsat_cutoff);
vz6 = vminq_f16(vz6, vsat_cutoff);
vz7 = vminq_f16(vz7, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn5 = vsubq_f16(vn5, vmagic_bias);
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn6 = vsubq_f16(vn6, vmagic_bias);
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn7 = vsubq_f16(vn7, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2);
const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2);
const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5);
float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6);
float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
vp5 = vfmsq_f16(vtwo, vp5, vt5);
vp6 = vfmsq_f16(vtwo, vp6, vt6);
vp7 = vfmsq_f16(vtwo, vp7, vt7);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vts5 = vmulq_f16(vt5, vs5);
const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one);
const float16x8_t vts6 = vmulq_f16(vt6, vs6);
const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one);
const float16x8_t vts7 = vmulq_f16(vt7, vs7);
const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5);
const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6);
const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo);
const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo);
const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
float16x8_t vy5 = vdivq_f16(vemo5, vepo5);
float16x8_t vy6 = vdivq_f16(vemo6, vepo6);
float16x8_t vy7 = vdivq_f16(vemo7, vepo7);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vy5 = vbslq_f16(vsign_mask, vx5, vy5);
vy6 = vbslq_f16(vsign_mask, vx6, vy6);
vy7 = vbslq_f16(vsign_mask, vx7, vy7);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 10,594 | 43.145833 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x72(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 9 * sizeof(float16x8_t); n -= 9 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vz7 = vabsq_f16(vx7);
float16x8_t vz8 = vabsq_f16(vx8);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
vz5 = vminq_f16(vz5, vsat_cutoff);
vz6 = vminq_f16(vz6, vsat_cutoff);
vz7 = vminq_f16(vz7, vsat_cutoff);
vz8 = vminq_f16(vz8, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn5 = vsubq_f16(vn5, vmagic_bias);
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn6 = vsubq_f16(vn6, vmagic_bias);
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn7 = vsubq_f16(vn7, vmagic_bias);
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
vn8 = vsubq_f16(vn8, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2);
const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2);
const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2);
const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5);
float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6);
float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7);
float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
vp5 = vfmsq_f16(vtwo, vp5, vt5);
vp6 = vfmsq_f16(vtwo, vp6, vt6);
vp7 = vfmsq_f16(vtwo, vp7, vt7);
vp8 = vfmsq_f16(vtwo, vp8, vt8);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vts5 = vmulq_f16(vt5, vs5);
const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one);
const float16x8_t vts6 = vmulq_f16(vt6, vs6);
const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one);
const float16x8_t vts7 = vmulq_f16(vt7, vs7);
const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one);
const float16x8_t vts8 = vmulq_f16(vt8, vs8);
const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5);
const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6);
const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7);
const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo);
const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo);
const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo);
const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
float16x8_t vy5 = vdivq_f16(vemo5, vepo5);
float16x8_t vy6 = vdivq_f16(vemo6, vepo6);
float16x8_t vy7 = vdivq_f16(vemo7, vepo7);
float16x8_t vy8 = vdivq_f16(vemo8, vepo8);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vy5 = vbslq_f16(vsign_mask, vx5, vy5);
vy6 = vbslq_f16(vsign_mask, vx6, vy6);
vy7 = vbslq_f16(vsign_mask, vx7, vy7);
vy8 = vbslq_f16(vsign_mask, vx8, vy8);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 11,449 | 43.726563 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x8(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 3,670 | 34.990196 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-aarch64-neonfp16arith-expm1minus-rr1-p3h2ts-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x80(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 10 * sizeof(float16x8_t); n -= 10 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
float16x8_t vz4 = vabsq_f16(vx4);
float16x8_t vz5 = vabsq_f16(vx5);
float16x8_t vz6 = vabsq_f16(vx6);
float16x8_t vz7 = vabsq_f16(vx7);
float16x8_t vz8 = vabsq_f16(vx8);
float16x8_t vz9 = vabsq_f16(vx9);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
vz4 = vminq_f16(vz4, vsat_cutoff);
vz5 = vminq_f16(vz5, vsat_cutoff);
vz6 = vminq_f16(vz6, vsat_cutoff);
vz7 = vminq_f16(vz7, vsat_cutoff);
vz8 = vminq_f16(vz8, vsat_cutoff);
vz9 = vminq_f16(vz9, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vz9, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn4 = vsubq_f16(vn4, vmagic_bias);
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn5 = vsubq_f16(vn5, vmagic_bias);
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
vn6 = vsubq_f16(vn6, vmagic_bias);
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn7 = vsubq_f16(vn7, vmagic_bias);
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
vn8 = vsubq_f16(vn8, vmagic_bias);
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
vn9 = vsubq_f16(vn9, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2);
const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2);
const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2);
const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2);
const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2);
const float16x8_t vt9 = vfmaq_f16(vz9, vn9, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4);
float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5);
float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6);
float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7);
float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8);
float16x8_t vp9 = vfmaq_f16(vc2, vc3, vt9);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
vp4 = vfmsq_f16(vtwo, vp4, vt4);
vp5 = vfmsq_f16(vtwo, vp5, vt5);
vp6 = vfmsq_f16(vtwo, vp6, vt6);
vp7 = vfmsq_f16(vtwo, vp7, vt7);
vp8 = vfmsq_f16(vtwo, vp8, vt8);
vp9 = vfmsq_f16(vtwo, vp9, vt9);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vts4 = vmulq_f16(vt4, vs4);
const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one);
const float16x8_t vts5 = vmulq_f16(vt5, vs5);
const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one);
const float16x8_t vts6 = vmulq_f16(vt6, vs6);
const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one);
const float16x8_t vts7 = vmulq_f16(vt7, vs7);
const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one);
const float16x8_t vts8 = vmulq_f16(vt8, vs8);
const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one);
const float16x8_t vts9 = vmulq_f16(vt9, vs9);
const float16x8_t vsmo9 = vaddq_f16(vs9, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4);
const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5);
const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6);
const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7);
const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8);
const float16x8_t vemo9 = vfmsq_f16(vsmo9, vp9, vts9);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo);
const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo);
const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo);
const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo);
const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo);
const float16x8_t vepo9 = vaddq_f16(vemo9, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
float16x8_t vy4 = vdivq_f16(vemo4, vepo4);
float16x8_t vy5 = vdivq_f16(vemo5, vepo5);
float16x8_t vy6 = vdivq_f16(vemo6, vepo6);
float16x8_t vy7 = vdivq_f16(vemo7, vepo7);
float16x8_t vy8 = vdivq_f16(vemo8, vepo8);
float16x8_t vy9 = vdivq_f16(vemo9, vepo9);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vy4 = vbslq_f16(vsign_mask, vx4, vy4);
vy5 = vbslq_f16(vsign_mask, vx5, vy5);
vy6 = vbslq_f16(vsign_mask, vx6, vy6);
vy7 = vbslq_f16(vsign_mask, vx7, vy7);
vy8 = vbslq_f16(vsign_mask, vx8, vy8);
vy9 = vbslq_f16(vsign_mask, vx9, vy9);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy9)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 12,306 | 44.246324 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
i += 16;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,355 | 33.356757 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
i += 24;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
o += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 7,412 | 35.160976 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
i += 32;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
o += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 8,469 | 36.644444 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
i += 40;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
o += 40;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 9,526 | 37.885714 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
i += 48;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
o += 48;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,583 | 38.939623 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
i += 56;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
o += 56;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 11,640 | 39.845614 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
i += 64;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
o += 64;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 12,697 | 40.632787 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x72(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64));
i += 72;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
__m256 vz8 = _mm256_cvtph_ps(vabsx8);
const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
__m256 vp8 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
__m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
vh8 = _mm_xor_si128(vh8, vinvsignx8);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
_mm_storeu_si128((__m128i*) (o + 64), vh8);
o += 72;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 13,754 | 41.323077 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 4,139 | 31.093023 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_div_x80(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64));
const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72));
i += 80;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask);
const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
__m256 vz8 = _mm256_cvtph_ps(vabsx8);
const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8);
__m256 vz9 = _mm256_cvtph_ps(vabsx9);
const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9);
vz0 = _mm256_max_ps(vsat_cutoff, vz0);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
vz1 = _mm256_max_ps(vsat_cutoff, vz1);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
vz2 = _mm256_max_ps(vsat_cutoff, vz2);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
vz3 = _mm256_max_ps(vsat_cutoff, vz3);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
vz4 = _mm256_max_ps(vsat_cutoff, vz4);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
vz5 = _mm256_max_ps(vsat_cutoff, vz5);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
vz6 = _mm256_max_ps(vsat_cutoff, vz6);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
vz7 = _mm256_max_ps(vsat_cutoff, vz7);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
vz8 = _mm256_max_ps(vsat_cutoff, vz8);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
vz9 = _mm256_max_ps(vsat_cutoff, vz9);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
__m256 vp8 = vc3;
__m256 vp9 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vy0 = _mm256_div_ps(vemo0, vepo0);
__m256 vy1 = _mm256_div_ps(vemo1, vepo1);
__m256 vy2 = _mm256_div_ps(vemo2, vepo2);
__m256 vy3 = _mm256_div_ps(vemo3, vepo3);
__m256 vy4 = _mm256_div_ps(vemo4, vepo4);
__m256 vy5 = _mm256_div_ps(vemo5, vepo5);
__m256 vy6 = _mm256_div_ps(vemo6, vepo6);
__m256 vy7 = _mm256_div_ps(vemo7, vepo7);
__m256 vy8 = _mm256_div_ps(vemo8, vepo8);
__m256 vy9 = _mm256_div_ps(vemo9, vepo9);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
__m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT);
__m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
vh8 = _mm_xor_si128(vh8, vinvsignx8);
vh9 = _mm_xor_si128(vh9, vinvsignx9);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
_mm_storeu_si128((__m128i*) (o + 64), vh8);
_mm_storeu_si128((__m128i*) (o + 72), vh9);
o += 80;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
vz = _mm256_max_ps(vsat_cutoff, vz);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vy = _mm256_div_ps(vemo, vepo);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 14,811 | 41.933333 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
i += 16;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 6,820 | 33.80102 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x24(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
i += 24;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
o += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 7,995 | 35.678899 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x32(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
i += 32;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
o += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 9,170 | 37.2125 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x40(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
i += 40;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
o += 40;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 10,345 | 38.48855 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x48(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
i += 48;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
o += 48;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 11,520 | 39.566901 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x56(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
i += 56;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
o += 56;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 12,695 | 40.490196 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x64(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
i += 64;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
o += 64;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 13,870 | 41.289634 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x72(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64));
i += 72;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
__m256 vz8 = _mm256_cvtph_ps(vabsx8);
const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
__m256 vp8 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
__m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
vh8 = _mm_xor_si128(vh8, vinvsignx8);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
_mm_storeu_si128((__m128i*) (o + 64), vh8);
o += 72;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 15,045 | 41.988571 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 4,367 | 31.355556 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-avx2-expm1minus-rr1-p3h2ts-rcp-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vtanh/avx-expm1minus.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vunary.h>
void xnn_f16_vtanh_ukernel__avx2_expm1minus_rr1_p3h2ts_rcp_x80(
size_t batch,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff);
const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2);
const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two);
const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24));
const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32));
const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40));
const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48));
const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56));
const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64));
const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72));
i += 80;
const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask);
const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask);
const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask);
const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask);
const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask);
const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask);
const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask);
const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask);
const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask);
const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask);
__m256 vz0 = _mm256_cvtph_ps(vabsx0);
const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0);
__m256 vz1 = _mm256_cvtph_ps(vabsx1);
const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1);
__m256 vz2 = _mm256_cvtph_ps(vabsx2);
const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2);
__m256 vz3 = _mm256_cvtph_ps(vabsx3);
const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3);
__m256 vz4 = _mm256_cvtph_ps(vabsx4);
const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4);
__m256 vz5 = _mm256_cvtph_ps(vabsx5);
const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5);
__m256 vz6 = _mm256_cvtph_ps(vabsx6);
const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6);
__m256 vz7 = _mm256_cvtph_ps(vabsx7);
const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7);
__m256 vz8 = _mm256_cvtph_ps(vabsx8);
const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8);
__m256 vz9 = _mm256_cvtph_ps(vabsx9);
const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9);
const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vm9 = _mm256_cmp_ps(vz9, vsat_cutoff, _CMP_LE_OS);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = vc3;
__m256 vp1 = vc3;
__m256 vp2 = vc3;
__m256 vp3 = vc3;
__m256 vp4 = vc3;
__m256 vp5 = vc3;
__m256 vp6 = vc3;
__m256 vp7 = vc3;
__m256 vp8 = vc3;
__m256 vp9 = vc3;
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo);
vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo);
vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo);
vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo);
vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo);
vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo);
vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo);
vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo);
vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo);
vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo);
const __m256 vts0 = _mm256_mul_ps(vt0, vs0);
const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one);
const __m256 vts1 = _mm256_mul_ps(vt1, vs1);
const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one);
const __m256 vts2 = _mm256_mul_ps(vt2, vs2);
const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one);
const __m256 vts3 = _mm256_mul_ps(vt3, vs3);
const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one);
const __m256 vts4 = _mm256_mul_ps(vt4, vs4);
const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one);
const __m256 vts5 = _mm256_mul_ps(vt5, vs5);
const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one);
const __m256 vts6 = _mm256_mul_ps(vt6, vs6);
const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one);
const __m256 vts7 = _mm256_mul_ps(vt7, vs7);
const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one);
const __m256 vts8 = _mm256_mul_ps(vt8, vs8);
const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one);
const __m256 vts9 = _mm256_mul_ps(vt9, vs9);
const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one);
const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0);
const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1);
const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2);
const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3);
const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4);
const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5);
const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6);
const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7);
const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8);
const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9);
const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo);
const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo);
const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo);
const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo);
const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo);
const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo);
const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo);
const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo);
const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo);
const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo);
__m256 vrepo0 = _mm256_rcp_ps(vepo0);
__m256 vrepo1 = _mm256_rcp_ps(vepo1);
__m256 vrepo2 = _mm256_rcp_ps(vepo2);
__m256 vrepo3 = _mm256_rcp_ps(vepo3);
__m256 vrepo4 = _mm256_rcp_ps(vepo4);
__m256 vrepo5 = _mm256_rcp_ps(vepo5);
__m256 vrepo6 = _mm256_rcp_ps(vepo6);
__m256 vrepo7 = _mm256_rcp_ps(vepo7);
__m256 vrepo8 = _mm256_rcp_ps(vepo8);
__m256 vrepo9 = _mm256_rcp_ps(vepo9);
__m256 vy0 = _mm256_mul_ps(vemo0, vrepo0);
__m256 vy1 = _mm256_mul_ps(vemo1, vrepo1);
__m256 vy2 = _mm256_mul_ps(vemo2, vrepo2);
__m256 vy3 = _mm256_mul_ps(vemo3, vrepo3);
__m256 vy4 = _mm256_mul_ps(vemo4, vrepo4);
__m256 vy5 = _mm256_mul_ps(vemo5, vrepo5);
__m256 vy6 = _mm256_mul_ps(vemo6, vrepo6);
__m256 vy7 = _mm256_mul_ps(vemo7, vrepo7);
__m256 vy8 = _mm256_mul_ps(vemo8, vrepo8);
__m256 vy9 = _mm256_mul_ps(vemo9, vrepo9);
vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0);
vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1);
vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2);
vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3);
vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4);
vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5);
vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6);
vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7);
vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8);
vy9 = _mm256_blendv_ps(vy9, vminus_one, vm9);
__m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT);
__m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT);
__m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT);
__m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT);
vh0 = _mm_xor_si128(vh0, vinvsignx0);
vh1 = _mm_xor_si128(vh1, vinvsignx1);
vh2 = _mm_xor_si128(vh2, vinvsignx2);
vh3 = _mm_xor_si128(vh3, vinvsignx3);
vh4 = _mm_xor_si128(vh4, vinvsignx4);
vh5 = _mm_xor_si128(vh5, vinvsignx5);
vh6 = _mm_xor_si128(vh6, vinvsignx6);
vh7 = _mm_xor_si128(vh7, vinvsignx7);
vh8 = _mm_xor_si128(vh8, vinvsignx8);
vh9 = _mm_xor_si128(vh9, vinvsignx9);
_mm_storeu_si128((__m128i*) o, vh0);
_mm_storeu_si128((__m128i*) (o + 8), vh1);
_mm_storeu_si128((__m128i*) (o + 16), vh2);
_mm_storeu_si128((__m128i*) (o + 24), vh3);
_mm_storeu_si128((__m128i*) (o + 32), vh4);
_mm_storeu_si128((__m128i*) (o + 40), vh5);
_mm_storeu_si128((__m128i*) (o + 48), vh6);
_mm_storeu_si128((__m128i*) (o + 56), vh7);
_mm_storeu_si128((__m128i*) (o + 64), vh8);
_mm_storeu_si128((__m128i*) (o + 72), vh9);
o += 80;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
if (batch != 0) {
const __m128i vx = _mm_loadu_si128((const __m128i*) i);
const __m128i vabsx = _mm_or_si128(vx, vsign_mask);
__m256 vz = _mm256_cvtph_ps(vabsx);
const __m128i vinvsignx = _mm_xor_si128(vx, vabsx);
const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = vc3;
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vtwo);
const __m256 vts = _mm256_mul_ps(vt, vs);
const __m256 vsmo = _mm256_add_ps(vs, vminus_one);
const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo);
const __m256 vepo = _mm256_add_ps(vemo, vtwo);
__m256 vrepo = _mm256_rcp_ps(vepo);
__m256 vy = _mm256_mul_ps(vemo, vrepo);
vy = _mm256_blendv_ps(vy, vminus_one, vm);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
vh = _mm_xor_si128(vh, vinvsignx);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 16,220 | 42.604839 | 104 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.