repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f16-maxpool/f16-maxpool-9p8x-minmax-f16c-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/maxpool.h>
void xnn_f16_maxpool_minmax_ukernel_9p8x__f16c_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
do {
uint16_t* o = output;
{
const uint16_t* i0 = *input++;
const uint16_t* i1 = *input++;
const uint16_t* i2 = *input++;
const uint16_t* i3 = *input++;
const uint16_t* i4 = *input++;
const uint16_t* i5 = *input++;
const uint16_t* i6 = *input++;
const uint16_t* i7 = *input++;
const uint16_t* i8 = *input++;
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 8; c -= 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
i8 += 8;
const __m256 vmax018 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vi8);
const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
const __m256 vmax01678 = _mm256_max_ps(vmax018, vmax67);
const __m256 vmax = _mm256_max_ps(vmax2345, vmax01678);
const __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if (c != 0) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
i8 += 8;
const __m256 vmax018 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vi8);
const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
const __m256 vmax01678 = _mm256_max_ps(vmax018, vmax67);
const __m256 vmax = _mm256_max_ps(vmax2345, vmax01678);
__m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
__m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT);
if (c & 4) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (c & 2) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (c & 1) {
*o = _mm_extract_epi16(vh, 0);
o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint16_t* i0 = *input++;
const uint16_t* i1 = *input++;
const uint16_t* i2 = *input++;
const uint16_t* i3 = *input++;
const uint16_t* i4 = *input++;
const uint16_t* i5 = *input++;
const uint16_t* i6 = *input++;
const uint16_t* i7 = *input++;
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 8; c -= 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vo = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) o));
const __m256 vmax01 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vo);
const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
const __m256 vmax0167 = _mm256_max_ps(vmax01, vmax67);
const __m256 vmax = _mm256_max_ps(vmax2345, vmax0167);
const __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if (c != 0) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
const __m256 vo = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) o));
const __m256 vmax01 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vo);
const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
const __m256 vmax0167 = _mm256_max_ps(vmax01, vmax67);
const __m256 vmax = _mm256_max_ps(vmax2345, vmax0167);
__m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
__m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT);
if (c & 4) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (c & 2) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (c & 1) {
*o = _mm_extract_epi16(vh, 0);
o += 1;
}
}
}
input = (const void**) ((uintptr_t) input + input_increment);
output = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 10,320 | 36.667883 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-maxpool/f16-maxpool-9p8x-minmax-neonfp16arith-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/maxpool.h>
void xnn_f16_maxpool_minmax_ukernel_9p8x__neonfp16arith_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(channels != 0);
const float16x8_t voutput_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t voutput_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
do {
uint16_t* o = output;
{
const uint16_t* i0 = *input++;
const uint16_t* i1 = *input++;
const uint16_t* i2 = *input++;
const uint16_t* i3 = *input++;
const uint16_t* i4 = *input++;
const uint16_t* i5 = *input++;
const uint16_t* i6 = *input++;
const uint16_t* i7 = *input++;
const uint16_t* i8 = *input++;
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
if (kernel_elements < 2) {
i1 = i0;
}
if (kernel_elements <= 2) {
i2 = i0;
}
if (kernel_elements < 4) {
i3 = i0;
}
if (kernel_elements <= 4) {
i4 = i0;
}
if (kernel_elements < 6) {
i5 = i0;
}
if (kernel_elements <= 6) {
i6 = i0;
}
if (kernel_elements < 8) {
i7 = i0;
}
if (kernel_elements <= 8) {
i8 = i0;
}
size_t c = channels;
for (; c >= 8; c -= 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8;
const float16x8_t vmax018 = vmaxq_f16(vmaxq_f16(vi0, vi1), vi8);
const float16x8_t vmax23 = vmaxq_f16(vi2, vi3);
const float16x8_t vmax45 = vmaxq_f16(vi4, vi5);
const float16x8_t vmax67 = vmaxq_f16(vi6, vi7);
const float16x8_t vmax2345 = vmaxq_f16(vmax23, vmax45);
const float16x8_t vmax01678 = vmaxq_f16(vmax018, vmax67);
const float16x8_t vmax = vmaxq_f16(vmax2345, vmax01678);
const float16x8_t vout = vmaxq_f16(vminq_f16(vmax, voutput_max), voutput_min);
vst1q_u16(o, vreinterpretq_u16_f16(vout)); o += 8;
}
if (c != 0) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8;
const float16x8_t vmax018 = vmaxq_f16(vmaxq_f16(vi0, vi1), vi8);
const float16x8_t vmax23 = vmaxq_f16(vi2, vi3);
const float16x8_t vmax45 = vmaxq_f16(vi4, vi5);
const float16x8_t vmax67 = vmaxq_f16(vi6, vi7);
const float16x8_t vmax2345 = vmaxq_f16(vmax23, vmax45);
const float16x8_t vmax01678 = vmaxq_f16(vmax018, vmax67);
const float16x8_t vmax = vmaxq_f16(vmax2345, vmax01678);
float16x8_t vout = vmaxq_f16(vminq_f16(vmax, voutput_max), voutput_min);
float16x4_t vout_lo = vget_low_f16(vout);
if (c & 4) {
vst1_u16(o, vreinterpret_u16_f16(vout_lo)); o += 4;
vout_lo = vget_high_f16(vout);
}
if (c & 2) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout_lo), 0); o += 2;
vout_lo = vext_f16(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u16(o, vreinterpret_u16_f16(vout_lo), 0); o += 1;
}
}
}
for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
const uint16_t* i0 = *input++;
const uint16_t* i1 = *input++;
const uint16_t* i2 = *input++;
const uint16_t* i3 = *input++;
const uint16_t* i4 = *input++;
const uint16_t* i5 = *input++;
const uint16_t* i6 = *input++;
const uint16_t* i7 = *input++;
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
if (k < 2) {
i1 = i0;
}
if (k <= 2) {
i2 = i0;
}
if (k < 4) {
i3 = i0;
}
if (k <= 4) {
i4 = i0;
}
if (k < 6) {
i5 = i0;
}
if (k <= 6) {
i6 = i0;
}
if (k < 8) {
i7 = i0;
}
o = output;
size_t c = channels;
for (; c >= 8; c -= 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vo = vreinterpretq_f16_u16(vld1q_u16(o));
const float16x8_t vmax01 = vmaxq_f16(vmaxq_f16(vi0, vi1), vo);
const float16x8_t vmax23 = vmaxq_f16(vi2, vi3);
const float16x8_t vmax45 = vmaxq_f16(vi4, vi5);
const float16x8_t vmax67 = vmaxq_f16(vi6, vi7);
const float16x8_t vmax2345 = vmaxq_f16(vmax23, vmax45);
const float16x8_t vmax0167 = vmaxq_f16(vmax01, vmax67);
const float16x8_t vmax = vmaxq_f16(vmax2345, vmax0167);
const float16x8_t vout = vmaxq_f16(vminq_f16(vmax, voutput_max), voutput_min);
vst1q_u16(o, vreinterpretq_u16_f16(vout)); o += 8;
}
if (c != 0) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0));
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1));
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2));
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3));
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4));
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5));
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6));
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7));
const float16x8_t vo = vreinterpretq_f16_u16(vld1q_u16(o));
const float16x8_t vmax01 = vmaxq_f16(vmaxq_f16(vi0, vi1), vo);
const float16x8_t vmax23 = vmaxq_f16(vi2, vi3);
const float16x8_t vmax45 = vmaxq_f16(vi4, vi5);
const float16x8_t vmax67 = vmaxq_f16(vi6, vi7);
const float16x8_t vmax2345 = vmaxq_f16(vmax23, vmax45);
const float16x8_t vmax0167 = vmaxq_f16(vmax01, vmax67);
const float16x8_t vmax = vmaxq_f16(vmax2345, vmax0167);
float16x8_t vout = vmaxq_f16(vminq_f16(vmax, voutput_max), voutput_min);
float16x4_t vout_lo = vget_low_f16(vout);
if (c & 4) {
vst1_u16(o, vreinterpret_u16_f16(vout_lo)); o += 4;
vout_lo = vget_high_f16(vout);
}
if (c & 2) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout_lo), 0); o += 2;
vout_lo = vext_f16(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u16(o, vreinterpret_u16_f16(vout_lo), 0); o += 1;
}
}
}
input = (const void**) ((uintptr_t) input + input_increment);
output = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 9,669 | 39.460251 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-pavgpool/f16-pavgpool-9p8x-minmax-avx2-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/pavgpool.h>
void xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
const void* zero,
const void* multiplier,
void* buffer,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
uint16_t* o = (uint16_t*) output;
do {
{
const uint16_t* i0 = (const uint16_t*) *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
const uint16_t* i1 = (const uint16_t*) *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
const uint16_t* i2 = (const uint16_t*) *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
const uint16_t* i3 = (const uint16_t*) *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
const uint16_t* i4 = (const uint16_t*) *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
const uint16_t* i5 = (const uint16_t*) *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
const uint16_t* i6 = (const uint16_t*) *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
const uint16_t* i7 = (const uint16_t*) *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
const uint16_t* i8 = (const uint16_t*) *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
}
uint16_t* b = (uint16_t*) buffer;
for (size_t c = 0; c < channels; c += 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
i8 += 8;
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) b, _mm256_cvtps_ph(vsum, _MM_FROUND_TO_NEAREST_INT));
b += 8;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const uint16_t* i0 = (const uint16_t*) *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
const uint16_t* i1 = (const uint16_t*) *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
const uint16_t* i2 = (const uint16_t*) *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
const uint16_t* i3 = (const uint16_t*) *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
const uint16_t* i4 = (const uint16_t*) *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
const uint16_t* i5 = (const uint16_t*) *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
const uint16_t* i6 = (const uint16_t*) *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
const uint16_t* i7 = (const uint16_t*) *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
uint16_t* b = (uint16_t*) buffer;
for (size_t c = 0; c < channels; c += 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) b, _mm256_cvtps_ph(vsum, _MM_FROUND_TO_NEAREST_INT));
b += 8;
}
}
{
const uint16_t* i0 = (const uint16_t*) input[0];
assert(i0 != NULL);
const uint16_t* i1 = (const uint16_t*) input[1];
const uint16_t* i2 = (const uint16_t*) input[2];
const uint16_t* i3 = (const uint16_t*) input[3];
const uint16_t* i4 = (const uint16_t*) input[4];
const uint16_t* i5 = (const uint16_t*) input[5];
const uint16_t* i6 = (const uint16_t*) input[6];
const uint16_t* i7 = (const uint16_t*) input[7];
input = (const void**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = (const uint16_t*) zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = (const uint16_t*) zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = (const uint16_t*) zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = (const uint16_t*) zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = (const uint16_t*) zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = (const uint16_t*) zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = (const uint16_t*) zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
const __m256 vmultiplier = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) multiplier)));
multiplier = (const uint16_t*) multiplier + 1;
size_t c = channels;
const uint16_t* b = (const uint16_t*) buffer;
while (c >= 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
b += 8;
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT));
__m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vmultiplier), _MM_FROUND_TO_NEAREST_INT));
vout = _mm256_max_ps(vout, voutput_min);
vout = _mm256_min_ps(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT));
o += 8;
c -= 8;
}
if (c != 0) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT));
__m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vmultiplier), _MM_FROUND_TO_NEAREST_INT));
vout = _mm256_max_ps(vout, voutput_min);
vout = _mm256_min_ps(vout, voutput_max);
__m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT);
if (c & 4) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (c & 2) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (c & 1) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
o += 1;
}
}
}
o = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 15,443 | 43.507205 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-pavgpool/f16-pavgpool-9p8x-minmax-neonfp16arith-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/pavgpool.h>
void xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
const void* zero,
const void* multiplier,
void* buffer,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const float16x8_t voutput_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t voutput_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
do {
{
const uint16_t* i0 = (const uint16_t*) *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
const uint16_t* i1 = (const uint16_t*) *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
const uint16_t* i2 = (const uint16_t*) *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
const uint16_t* i3 = (const uint16_t*) *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
const uint16_t* i4 = (const uint16_t*) *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
const uint16_t* i5 = (const uint16_t*) *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
const uint16_t* i6 = (const uint16_t*) *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
const uint16_t* i7 = (const uint16_t*) *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
const uint16_t* i8 = (const uint16_t*) *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
}
uint16_t* b = (uint16_t*) buffer;
for (size_t c = 0; c < channels; c += 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8;
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum018 = vaddq_f16(vsum01, vi8);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678);
vst1q_u16(b, vreinterpretq_u16_f16(vsum)); b += 8;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const uint16_t* i0 = (const uint16_t*) *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
const uint16_t* i1 = (const uint16_t*) *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
const uint16_t* i2 = (const uint16_t*) *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
const uint16_t* i3 = (const uint16_t*) *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
const uint16_t* i4 = (const uint16_t*) *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
const uint16_t* i5 = (const uint16_t*) *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
const uint16_t* i6 = (const uint16_t*) *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
const uint16_t* i7 = (const uint16_t*) *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
uint16_t* b = (uint16_t*) buffer;
for (size_t c = 0; c < channels; c += 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
vst1q_u16(b, vreinterpretq_u16_f16(vsum)); b += 8;
}
}
{
const uint16_t* i0 = (const uint16_t*) input[0];
assert(i0 != NULL);
const uint16_t* i1 = (const uint16_t*) input[1];
const uint16_t* i2 = (const uint16_t*) input[2];
const uint16_t* i3 = (const uint16_t*) input[3];
const uint16_t* i4 = (const uint16_t*) input[4];
const uint16_t* i5 = (const uint16_t*) input[5];
const uint16_t* i6 = (const uint16_t*) input[6];
const uint16_t* i7 = (const uint16_t*) input[7];
input = (const void**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = (const uint16_t*) zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = (const uint16_t*) zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = (const uint16_t*) zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = (const uint16_t*) zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = (const uint16_t*) zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = (const uint16_t*) zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = (const uint16_t*) zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
const float16x8_t vmultiplier = vreinterpretq_f16_u16(vld1q_dup_u16(multiplier)); multiplier = (const uint16_t*) multiplier + 1;
size_t c = channels;
const uint16_t* b = (const uint16_t*) buffer;
while (c >= 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
float16x8_t vout = vmulq_f16(vsum, vmultiplier);
vout = vmaxq_f16(vout, voutput_min);
vout = vminq_f16(vout, voutput_max);
vst1q_u16(output, vreinterpretq_u16_f16(vout)); output = (uint16_t*) output + 8;
c -= 8;
}
if (c != 0) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0));
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1));
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2));
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3));
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4));
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5));
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6));
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7));
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum01a = vaddq_f16(vsum01, vacc);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a);
float16x8_t vout = vmulq_f16(vsum, vmultiplier);
vout = vmaxq_f16(vout, voutput_min);
vout = vminq_f16(vout, voutput_max);
float16x4_t vout_lo = vget_low_f16(vout);
if (c & 4) {
vst1_u16(output, vreinterpret_u16_f16(vout_lo)); output = (uint16_t*) output + 4;
vout_lo = vget_high_f16(vout);
}
if (c & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vout_lo), 0); output = (uint16_t*) output + 2;
vout_lo = vext_f16(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vout_lo), 0); output = (uint16_t*) output + 1;
}
}
}
output = (uint16_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 12,715 | 39.75641 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-pavgpool/f16-pavgpool-9x-minmax-avx2-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/pavgpool.h>
void xnn_f16_pavgpool_minmax_ukernel_9x__avx2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
const void* zero,
const void* multiplier,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t* i0 = (const uint16_t*) input[0];
assert(i0 != NULL);
const uint16_t* i1 = (const uint16_t*) input[1];
const uint16_t* i2 = (const uint16_t*) input[2];
const uint16_t* i3 = (const uint16_t*) input[3];
const uint16_t* i4 = (const uint16_t*) input[4];
const uint16_t* i5 = (const uint16_t*) input[5];
const uint16_t* i6 = (const uint16_t*) input[6];
const uint16_t* i7 = (const uint16_t*) input[7];
const uint16_t* i8 = (const uint16_t*) input[8];
input = (const void**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = (const uint16_t*) zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = (const uint16_t*) zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = (const uint16_t*) zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = (const uint16_t*) zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = (const uint16_t*) zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = (const uint16_t*) zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = (const uint16_t*) zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = (const uint16_t*) zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
}
const __m256 vmultiplier = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) multiplier)));
multiplier = (const uint16_t*) multiplier + 1;
size_t c = channels;
while (c >= 8) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
i7 += 8;
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
i8 += 8;
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT));
__m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vmultiplier), _MM_FROUND_TO_NEAREST_INT));
vout = _mm256_max_ps(vout, voutput_min);
vout = _mm256_min_ps(vout, voutput_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT));
o += 8;
c -= 8;
}
if (c != 0) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT));
const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT));
__m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vmultiplier), _MM_FROUND_TO_NEAREST_INT));
vout = _mm256_max_ps(vout, voutput_min);
vout = _mm256_min_ps(vout, voutput_max);
__m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT);
if (c & 4) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (c & 2) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (c & 1) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
o += 1;
}
}
o = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 7,994 | 40.42487 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-pavgpool/f16-pavgpool-9x-minmax-neonfp16arith-c8.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/pavgpool.h>
void xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const void** input,
size_t input_offset,
const void* zero,
const void* multiplier,
void* output,
size_t input_increment,
size_t output_increment,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements != 0);
assert(kernel_elements <= 9);
assert(channels != 0);
const float16x8_t voutput_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t voutput_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
do {
const uint16_t* i0 = (const uint16_t*) input[0];
assert(i0 != NULL);
const uint16_t* i1 = (const uint16_t*) input[1];
const uint16_t* i2 = (const uint16_t*) input[2];
const uint16_t* i3 = (const uint16_t*) input[3];
const uint16_t* i4 = (const uint16_t*) input[4];
const uint16_t* i5 = (const uint16_t*) input[5];
const uint16_t* i6 = (const uint16_t*) input[6];
const uint16_t* i7 = (const uint16_t*) input[7];
const uint16_t* i8 = (const uint16_t*) input[8];
input = (const void**) ((uintptr_t) input + input_increment);
if (kernel_elements < 2) {
i1 = (const uint16_t*) zero;
}
assert(i1 != NULL);
if (kernel_elements <= 2) {
i2 = (const uint16_t*) zero;
}
assert(i2 != NULL);
if (kernel_elements < 4) {
i3 = (const uint16_t*) zero;
}
assert(i3 != NULL);
if (kernel_elements <= 4) {
i4 = (const uint16_t*) zero;
}
assert(i4 != NULL);
if (kernel_elements < 6) {
i5 = (const uint16_t*) zero;
}
assert(i5 != NULL);
if (kernel_elements <= 6) {
i6 = (const uint16_t*) zero;
}
assert(i6 != NULL);
if (kernel_elements < 8) {
i7 = (const uint16_t*) zero;
}
assert(i7 != NULL);
if (kernel_elements <= 8) {
i8 = (const uint16_t*) zero;
}
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
}
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
}
const float16x8_t vmultiplier = vreinterpretq_f16_u16(vld1q_dup_u16(multiplier)); multiplier = (const uint16_t*) multiplier + 1;
size_t c = channels;
while (c >= 8) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8;
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum018 = vaddq_f16(vsum01, vi8);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678);
float16x8_t vout = vmulq_f16(vsum, vmultiplier);
vout = vmaxq_f16(vout, voutput_min);
vout = vminq_f16(vout, voutput_max);
vst1q_u16(output, vreinterpretq_u16_f16(vout)); output = (uint16_t*) output + 8;
c -= 8;
}
if (c != 0) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0));
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1));
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2));
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3));
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4));
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5));
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6));
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7));
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8));
const float16x8_t vsum01 = vaddq_f16(vi0, vi1);
const float16x8_t vsum23 = vaddq_f16(vi2, vi3);
const float16x8_t vsum45 = vaddq_f16(vi4, vi5);
const float16x8_t vsum67 = vaddq_f16(vi6, vi7);
const float16x8_t vsum018 = vaddq_f16(vsum01, vi8);
const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45);
const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67);
const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678);
float16x8_t vout = vmulq_f16(vsum, vmultiplier);
vout = vmaxq_f16(vout, voutput_min);
vout = vminq_f16(vout, voutput_max);
float16x4_t vout_lo = vget_low_f16(vout);
if (c & 4) {
vst1_u16(output, vreinterpret_u16_f16(vout_lo)); output = (uint16_t*) output + 4;
vout_lo = vget_high_f16(vout);
}
if (c & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vout_lo), 0); output = (uint16_t*) output + 2;
vout_lo = vext_f16(vout_lo, vout_lo, 2);
}
if (c & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vout_lo), 0); output = (uint16_t*) output + 1;
}
}
output = (void*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 6,696 | 36.836158 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-prelu/gen/f16-prelu-f16c-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-prelu/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f16_prelu_ukernel__f16c_2x16(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
const __m256 vw89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
w += 16;
const __m256 vi0x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi0x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
i0 += 16;
const __m256 vi1x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi1x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
i1 += 16;
__m256 vacc0x001234567 = _mm256_mul_ps(vi0x001234567, vw01234567);
__m256 vacc0x089ABCDEF = _mm256_mul_ps(vi0x089ABCDEF, vw89ABCDEF);
__m256 vacc1x001234567 = _mm256_mul_ps(vi1x001234567, vw01234567);
__m256 vacc1x089ABCDEF = _mm256_mul_ps(vi1x089ABCDEF, vw89ABCDEF);
vacc0x001234567 = _mm256_blendv_ps(vi0x001234567, vacc0x001234567, vi0x001234567);
vacc0x089ABCDEF = _mm256_blendv_ps(vi0x089ABCDEF, vacc0x089ABCDEF, vi0x089ABCDEF);
vacc1x001234567 = _mm256_blendv_ps(vi1x001234567, vacc1x001234567, vi1x001234567);
vacc1x089ABCDEF = _mm256_blendv_ps(vi1x089ABCDEF, vacc1x089ABCDEF, vi1x089ABCDEF);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o0 + 0), _mm256_cvtps_ph(vacc0x001234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o0 += 16;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o1 + 0), _mm256_cvtps_ph(vacc1x001234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o1 += 16;
}
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
w += 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
__m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
__m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
o0 += 8;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
__m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
__m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
if (c & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o0, vh0x01234567);
_mm_storel_epi64((__m128i*) o1, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o0, vh0x01234567);
_mm_storeu_si32(o1, vh1x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(uint16_t))) {
*o0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*o1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,166 | 40.113333 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-prelu/gen/f16-prelu-f16c-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-prelu/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f16_prelu_ukernel__f16c_2x8(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
w += 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
__m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
__m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
o0 += 8;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
__m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
__m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
if (c & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o0, vh0x01234567);
_mm_storel_epi64((__m128i*) o1, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o0, vh0x01234567);
_mm_storeu_si32(o1, vh1x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(uint16_t))) {
*o0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*o1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,185 | 34.176471 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-prelu/gen/f16-prelu-neonfp16arith-2x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-prelu/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f16_prelu_ukernel__neonfp16arith_2x16(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vw89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vi0x001234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x089ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x001234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x089ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
float16x8_t vacc0x001234567 = vmulq_f16(vi0x001234567, vw01234567);
const uint16x8_t vm0x001234567 = vcltq_s16(vreinterpretq_s16_f16(vi0x001234567), vmovq_n_s16(0));
float16x8_t vacc0x089ABCDEF = vmulq_f16(vi0x089ABCDEF, vw89ABCDEF);
const uint16x8_t vm0x089ABCDEF = vcltq_s16(vreinterpretq_s16_f16(vi0x089ABCDEF), vmovq_n_s16(0));
float16x8_t vacc1x001234567 = vmulq_f16(vi1x001234567, vw01234567);
const uint16x8_t vm1x001234567 = vcltq_s16(vreinterpretq_s16_f16(vi1x001234567), vmovq_n_s16(0));
float16x8_t vacc1x089ABCDEF = vmulq_f16(vi1x089ABCDEF, vw89ABCDEF);
const uint16x8_t vm1x089ABCDEF = vcltq_s16(vreinterpretq_s16_f16(vi1x089ABCDEF), vmovq_n_s16(0));
vacc0x001234567 = vbslq_f16(vm0x001234567, vacc0x001234567, vi0x001234567);
vacc0x089ABCDEF = vbslq_f16(vm0x089ABCDEF, vacc0x089ABCDEF, vi0x089ABCDEF);
vacc1x001234567 = vbslq_f16(vm1x001234567, vacc1x001234567, vi1x001234567);
vacc1x089ABCDEF = vbslq_f16(vm1x089ABCDEF, vacc1x089ABCDEF, vi1x089ABCDEF);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x001234567)); o0 += 8;
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x089ABCDEF)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x001234567)); o1 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x089ABCDEF)); o1 += 8;
}
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0));
i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1));
i1 += 8;
float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567);
const uint16x8_t vm0x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi0x01234567), vmovq_n_s16(0));
float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567);
const uint16x8_t vm1x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi1x01234567), vmovq_n_s16(0));
vacc0x01234567 = vbslq_f16(vm0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = vbslq_f16(vm1x01234567, vacc1x01234567, vi1x01234567);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x01234567)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x01234567)); o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567);
const uint16x8_t vm0x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi0x01234567), vmovq_n_s16(0));
float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567);
const uint16x8_t vm1x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi1x01234567), vmovq_n_s16(0));
vacc0x01234567 = vbslq_f16(vm0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = vbslq_f16(vm1x01234567, vacc1x01234567, vi1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o0, vreinterpret_u16_f16(vacc0x0123)); o0 += 4;
vst1_u16(o1, vreinterpret_u16_f16(vacc1x0123)); o1 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vacc0x0123), 0); o0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vacc1x0123), 0); o1 += 2;
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o0, vreinterpret_u16_f16(vacc0x0123), 0); o0 += 1;
vst1_lane_u16(o1, vreinterpret_u16_f16(vacc1x0123), 0); o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,211 | 44.343066 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-prelu/gen/f16-prelu-neonfp16arith-2x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-prelu/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/prelu.h>
void xnn_f16_prelu_ukernel__neonfp16arith_2x8(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0));
i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1));
i1 += 8;
float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567);
const uint16x8_t vm0x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi0x01234567), vmovq_n_s16(0));
float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567);
const uint16x8_t vm1x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi1x01234567), vmovq_n_s16(0));
vacc0x01234567 = vbslq_f16(vm0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = vbslq_f16(vm1x01234567, vacc1x01234567, vi1x01234567);
vst1q_u16(o0, vreinterpretq_u16_f16(vacc0x01234567)); o0 += 8;
vst1q_u16(o1, vreinterpretq_u16_f16(vacc1x01234567)); o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
float16x8_t vacc0x01234567 = vmulq_f16(vi0x01234567, vw01234567);
const uint16x8_t vm0x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi0x01234567), vmovq_n_s16(0));
float16x8_t vacc1x01234567 = vmulq_f16(vi1x01234567, vw01234567);
const uint16x8_t vm1x01234567 = vcltq_s16(vreinterpretq_s16_f16(vi1x01234567), vmovq_n_s16(0));
vacc0x01234567 = vbslq_f16(vm0x01234567, vacc0x01234567, vi0x01234567);
vacc1x01234567 = vbslq_f16(vm1x01234567, vacc1x01234567, vi1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o0, vreinterpret_u16_f16(vacc0x0123)); o0 += 4;
vst1_u16(o1, vreinterpret_u16_f16(vacc1x0123)); o1 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vacc0x0123), 0); o0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vacc1x0123), 0); o1 += 2;
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o0, vreinterpret_u16_f16(vacc0x0123), 0); o0 += 1;
vst1_lane_u16(o1, vreinterpret_u16_f16(vacc1x0123), 0); o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 4,299 | 38.449541 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
o += 32;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 7,201 | 38.355191 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32_acc4(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
o += 32;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 7,355 | 38.336898 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x32(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
o += 32;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 7,119 | 38.337017 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x40-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
i += 40;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
o += 40;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 7,939 | 39.717949 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x40-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc5(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
i += 40;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
o += 40;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 8,170 | 39.651741 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
i += 40;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
o += 40;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 7,857 | 39.715026 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x48-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
i += 48;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
o += 48;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 8,677 | 40.922705 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x48-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
i += 48;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
o += 48;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 8,754 | 40.889952 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
i += 48;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
o += 48;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 8,595 | 40.931707 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x64-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
i += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
o += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 10,153 | 42.95671 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x64-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64_acc4(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
i += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
o += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc2 = _mm256_add_ps(vacc2, vf6);
vacc3 = _mm256_add_ps(vacc3, vf7);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 10,307 | 42.86383 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x64(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
i += 64;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
o += 64;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 10,071 | 42.982533 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x72-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x72_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
i += 72;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
o += 72;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 10,968 | 43.771429 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x72(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
i += 72;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
o += 72;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 10,809 | 43.854772 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x80-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
i += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
o += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 11,629 | 44.607843 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x80-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80_acc5(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
i += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
o += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc1 = _mm256_add_ps(vacc1, vf6);
vacc2 = _mm256_add_ps(vacc2, vf7);
vacc3 = _mm256_add_ps(vacc3, vf8);
vacc4 = _mm256_add_ps(vacc4, vf9);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 11,860 | 44.444444 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x80(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
i += 80;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
o += 80;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 11,547 | 44.644269 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x96-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
const __m256 viA = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 80)));
const __m256 viB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 88)));
i += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vxA = _mm256_sub_ps(viA, vi_max);
const __m256 vxB = _mm256_sub_ps(viB, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vnA = _mm256_fmadd_ps(vxA, vlog2e, vmagic_bias);
__m256 vnB = _mm256_fmadd_ps(vxB, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vsA = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnA), 23));
const __m256 vsB = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnB), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vnA = _mm256_sub_ps(vnA, vmagic_bias);
vnB = _mm256_sub_ps(vnB, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vtA = _mm256_fmadd_ps(vnA, vminus_ln2, vxA);
__m256 vtB = _mm256_fmadd_ps(vnB, vminus_ln2, vxB);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
const __m256 vpA = _mm256_fmadd_ps(vc2, vtA, vc1);
const __m256 vpB = _mm256_fmadd_ps(vc2, vtB, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vtA = _mm256_mul_ps(vtA, vsA);
vtB = _mm256_mul_ps(vtB, vsB);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vfA = _mm256_fmadd_ps(vtA, vpA, vsA);
__m256 vfB = _mm256_fmadd_ps(vtB, vpB, vsB);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vfA = _mm256_andnot_ps(_mm256_cmp_ps(vxA, vdenorm_cutoff, _CMP_LT_OS), vfA);
vfB = _mm256_andnot_ps(_mm256_cmp_ps(vxB, vdenorm_cutoff, _CMP_LT_OS), vfB);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 80), _mm256_cvtps_ph(vfA, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 88), _mm256_cvtps_ph(vfB, _MM_FROUND_TO_NEAREST_INT));
o += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc1 = _mm256_add_ps(vacc1, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc1 = _mm256_add_ps(vacc1, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc1 = _mm256_add_ps(vacc1, vf9);
vacc0 = _mm256_add_ps(vacc0, vfA);
vacc1 = _mm256_add_ps(vacc1, vfB);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 13,105 | 45.97491 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x96-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
const __m256 viA = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 80)));
const __m256 viB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 88)));
i += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vxA = _mm256_sub_ps(viA, vi_max);
const __m256 vxB = _mm256_sub_ps(viB, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vnA = _mm256_fmadd_ps(vxA, vlog2e, vmagic_bias);
__m256 vnB = _mm256_fmadd_ps(vxB, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vsA = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnA), 23));
const __m256 vsB = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnB), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vnA = _mm256_sub_ps(vnA, vmagic_bias);
vnB = _mm256_sub_ps(vnB, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vtA = _mm256_fmadd_ps(vnA, vminus_ln2, vxA);
__m256 vtB = _mm256_fmadd_ps(vnB, vminus_ln2, vxB);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
const __m256 vpA = _mm256_fmadd_ps(vc2, vtA, vc1);
const __m256 vpB = _mm256_fmadd_ps(vc2, vtB, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vtA = _mm256_mul_ps(vtA, vsA);
vtB = _mm256_mul_ps(vtB, vsB);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vfA = _mm256_fmadd_ps(vtA, vpA, vsA);
__m256 vfB = _mm256_fmadd_ps(vtB, vpB, vsB);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vfA = _mm256_andnot_ps(_mm256_cmp_ps(vxA, vdenorm_cutoff, _CMP_LT_OS), vfA);
vfB = _mm256_andnot_ps(_mm256_cmp_ps(vxB, vdenorm_cutoff, _CMP_LT_OS), vfB);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 80), _mm256_cvtps_ph(vfA, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 88), _mm256_cvtps_ph(vfB, _MM_FROUND_TO_NEAREST_INT));
o += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc1 = _mm256_add_ps(vacc1, vf4);
vacc2 = _mm256_add_ps(vacc2, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc1 = _mm256_add_ps(vacc1, vfA);
vacc2 = _mm256_add_ps(vacc2, vfB);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 13,182 | 45.914591 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x96-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96_acc6(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
__m256 vacc4 = _mm256_setzero_ps();
__m256 vacc5 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
const __m256 viA = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 80)));
const __m256 viB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 88)));
i += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vxA = _mm256_sub_ps(viA, vi_max);
const __m256 vxB = _mm256_sub_ps(viB, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vnA = _mm256_fmadd_ps(vxA, vlog2e, vmagic_bias);
__m256 vnB = _mm256_fmadd_ps(vxB, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vsA = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnA), 23));
const __m256 vsB = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnB), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vnA = _mm256_sub_ps(vnA, vmagic_bias);
vnB = _mm256_sub_ps(vnB, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vtA = _mm256_fmadd_ps(vnA, vminus_ln2, vxA);
__m256 vtB = _mm256_fmadd_ps(vnB, vminus_ln2, vxB);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
const __m256 vpA = _mm256_fmadd_ps(vc2, vtA, vc1);
const __m256 vpB = _mm256_fmadd_ps(vc2, vtB, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vtA = _mm256_mul_ps(vtA, vsA);
vtB = _mm256_mul_ps(vtB, vsB);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vfA = _mm256_fmadd_ps(vtA, vpA, vsA);
__m256 vfB = _mm256_fmadd_ps(vtB, vpB, vsB);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vfA = _mm256_andnot_ps(_mm256_cmp_ps(vxA, vdenorm_cutoff, _CMP_LT_OS), vfA);
vfB = _mm256_andnot_ps(_mm256_cmp_ps(vxB, vdenorm_cutoff, _CMP_LT_OS), vfB);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 80), _mm256_cvtps_ph(vfA, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 88), _mm256_cvtps_ph(vfB, _MM_FROUND_TO_NEAREST_INT));
o += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc1 = _mm256_add_ps(vacc1, vf1);
vacc2 = _mm256_add_ps(vacc2, vf2);
vacc3 = _mm256_add_ps(vacc3, vf3);
vacc4 = _mm256_add_ps(vacc4, vf4);
vacc5 = _mm256_add_ps(vacc5, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc1 = _mm256_add_ps(vacc1, vf7);
vacc2 = _mm256_add_ps(vacc2, vf8);
vacc3 = _mm256_add_ps(vacc3, vf9);
vacc4 = _mm256_add_ps(vacc4, vfA);
vacc5 = _mm256_add_ps(vacc5, vfB);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc4 = _mm256_add_ps(vacc4, vacc5);
vacc0 = _mm256_add_ps(vacc0, vacc2);
vacc0 = _mm256_add_ps(vacc0, vacc4);
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 13,413 | 45.738676 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x96(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32)));
const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40)));
const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48)));
const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56)));
const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64)));
const __m256 vi9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72)));
const __m256 viA = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 80)));
const __m256 viB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 88)));
i += 96;
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vxA = _mm256_sub_ps(viA, vi_max);
const __m256 vxB = _mm256_sub_ps(viB, vi_max);
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vnA = _mm256_fmadd_ps(vxA, vlog2e, vmagic_bias);
__m256 vnB = _mm256_fmadd_ps(vxB, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vsA = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnA), 23));
const __m256 vsB = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vnB), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vnA = _mm256_sub_ps(vnA, vmagic_bias);
vnB = _mm256_sub_ps(vnB, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vx9);
__m256 vtA = _mm256_fmadd_ps(vnA, vminus_ln2, vxA);
__m256 vtB = _mm256_fmadd_ps(vnB, vminus_ln2, vxB);
const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
const __m256 vp2 = _mm256_fmadd_ps(vc2, vt2, vc1);
const __m256 vp3 = _mm256_fmadd_ps(vc2, vt3, vc1);
const __m256 vp4 = _mm256_fmadd_ps(vc2, vt4, vc1);
const __m256 vp5 = _mm256_fmadd_ps(vc2, vt5, vc1);
const __m256 vp6 = _mm256_fmadd_ps(vc2, vt6, vc1);
const __m256 vp7 = _mm256_fmadd_ps(vc2, vt7, vc1);
const __m256 vp8 = _mm256_fmadd_ps(vc2, vt8, vc1);
const __m256 vp9 = _mm256_fmadd_ps(vc2, vt9, vc1);
const __m256 vpA = _mm256_fmadd_ps(vc2, vtA, vc1);
const __m256 vpB = _mm256_fmadd_ps(vc2, vtB, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vtA = _mm256_mul_ps(vtA, vsA);
vtB = _mm256_mul_ps(vtB, vsB);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vfA = _mm256_fmadd_ps(vtA, vpA, vsA);
__m256 vfB = _mm256_fmadd_ps(vtB, vpB, vsB);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vfA = _mm256_andnot_ps(_mm256_cmp_ps(vxA, vdenorm_cutoff, _CMP_LT_OS), vfA);
vfB = _mm256_andnot_ps(_mm256_cmp_ps(vxB, vdenorm_cutoff, _CMP_LT_OS), vfB);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vf2, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vf3, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vf4, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vf5, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vf6, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vf7, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vf8, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vf9, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 80), _mm256_cvtps_ph(vfA, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 88), _mm256_cvtps_ph(vfB, _MM_FROUND_TO_NEAREST_INT));
o += 96;
vacc0 = _mm256_add_ps(vacc0, vf0);
vacc0 = _mm256_add_ps(vacc0, vf1);
vacc0 = _mm256_add_ps(vacc0, vf2);
vacc0 = _mm256_add_ps(vacc0, vf3);
vacc0 = _mm256_add_ps(vacc0, vf4);
vacc0 = _mm256_add_ps(vacc0, vf5);
vacc0 = _mm256_add_ps(vacc0, vf6);
vacc0 = _mm256_add_ps(vacc0, vf7);
vacc0 = _mm256_add_ps(vacc0, vf8);
vacc0 = _mm256_add_ps(vacc0, vf9);
vacc0 = _mm256_add_ps(vacc0, vfA);
vacc0 = _mm256_add_ps(vacc0, vfB);
}
__m256 vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
o += 8;
vacc = _mm256_add_ps(vacc, vf);
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
}
}
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
*((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
_mm256_zeroupper();
}
| 13,023 | 46.018051 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 7,650 | 40.581522 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32_acc4(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc3 = vaddq_f16(vacc3, vf3);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc0 = vaddq_f16(vacc0, vacc2);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 7,842 | 40.718085 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 7,549 | 40.483516 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x40-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 8,431 | 41.585859 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x40-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40_acc5(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc4 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc3 = vaddq_f16(vacc3, vf3);
vacc4 = vaddq_f16(vacc4, vf4);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc0 = vaddq_f16(vacc0, vacc2);
vacc0 = vaddq_f16(vacc0, vacc4);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 8,719 | 41.745098 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 8,330 | 41.505102 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x48-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc1 = vaddq_f16(vacc1, vf5);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 9,212 | 42.457547 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x48-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc1 = vaddq_f16(vacc1, vf4);
vacc2 = vaddq_f16(vacc2, vf5);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc0 = vaddq_f16(vacc0, vacc2);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 9,308 | 42.5 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 9,111 | 42.390476 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x64-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc1 = vaddq_f16(vacc1, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 10,774 | 43.895833 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x64-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64_acc4(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc3 = vaddq_f16(vacc3, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc1 = vaddq_f16(vacc1, vf5);
vacc2 = vaddq_f16(vacc2, vf6);
vacc3 = vaddq_f16(vacc3, vf7);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc0 = vaddq_f16(vacc0, vacc2);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 10,966 | 43.946721 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x64(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc0 = vaddq_f16(vacc0, vf7);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 10,673 | 43.848739 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x72-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x72_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc1 = vaddq_f16(vacc1, vf4);
vacc2 = vaddq_f16(vacc2, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
vacc2 = vaddq_f16(vacc2, vf8);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc0 = vaddq_f16(vacc0, vacc2);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 11,651 | 44.515625 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x72(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc0 = vaddq_f16(vacc0, vf7);
vacc0 = vaddq_f16(vacc0, vf8);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 11,454 | 44.456349 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x80-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc1 = vaddq_f16(vacc1, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
vacc0 = vaddq_f16(vacc0, vf8);
vacc1 = vaddq_f16(vacc1, vf9);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 12,336 | 45.033582 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x80-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc5(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc4 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc3 = vaddq_f16(vacc3, vf3);
vacc4 = vaddq_f16(vacc4, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
vacc1 = vaddq_f16(vacc1, vf6);
vacc2 = vaddq_f16(vacc2, vf7);
vacc3 = vaddq_f16(vacc3, vf8);
vacc4 = vaddq_f16(vacc4, vf9);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc0 = vaddq_f16(vacc0, vacc2);
vacc0 = vaddq_f16(vacc0, vacc4);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 12,624 | 45.076642 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc0 = vaddq_f16(vacc0, vf7);
vacc0 = vaddq_f16(vacc0, vf8);
vacc0 = vaddq_f16(vacc0, vf9);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 12,235 | 45 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x96-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc2(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viA = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viB = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
const float16x8_t vxA = vsubq_f16(viA, vi_max);
const float16x8_t vxB = vsubq_f16(viB, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
float16x8_t vnA = vfmaq_f16(vmagic_bias, vxA, vlog2e);
float16x8_t vnB = vfmaq_f16(vmagic_bias, vxB, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
const float16x8_t vsA = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnA), 10));
const float16x8_t vsB = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnB), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
vnA = vsubq_f16(vnA, vmagic_bias);
vnB = vsubq_f16(vnB, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
float16x8_t vtA = vfmaq_f16(vxA, vnA, vminus_ln2_hi);
float16x8_t vtB = vfmaq_f16(vxB, vnB, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
vtA = vfmaq_f16(vtA, vnA, vminus_ln2_lo);
vtB = vfmaq_f16(vtB, vnB, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
const float16x8_t vpA = vfmaq_f16(vc1, vc2, vtA);
const float16x8_t vpB = vfmaq_f16(vc1, vc2, vtB);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
vtA = vmulq_f16(vtA, vsA);
vtB = vmulq_f16(vtB, vsB);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
float16x8_t vfA = vfmaq_f16(vsA, vpA, vtA);
const uint16x8_t vmA = vcltq_f16(vxA, vdenorm_cutoff);
float16x8_t vfB = vfmaq_f16(vsB, vpB, vtB);
const uint16x8_t vmB = vcltq_f16(vxB, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vfA = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfA), vmA));
vfB = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfB), vmB));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfA)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfB)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc1 = vaddq_f16(vacc1, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc1 = vaddq_f16(vacc1, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
vacc0 = vaddq_f16(vacc0, vf8);
vacc1 = vaddq_f16(vacc1, vf9);
vacc0 = vaddq_f16(vacc0, vfA);
vacc1 = vaddq_f16(vacc1, vfB);
}
vacc0 = vaddq_f16(vacc0, vacc1);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 13,898 | 45.956081 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x96-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc3(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viA = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viB = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
const float16x8_t vxA = vsubq_f16(viA, vi_max);
const float16x8_t vxB = vsubq_f16(viB, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
float16x8_t vnA = vfmaq_f16(vmagic_bias, vxA, vlog2e);
float16x8_t vnB = vfmaq_f16(vmagic_bias, vxB, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
const float16x8_t vsA = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnA), 10));
const float16x8_t vsB = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnB), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
vnA = vsubq_f16(vnA, vmagic_bias);
vnB = vsubq_f16(vnB, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
float16x8_t vtA = vfmaq_f16(vxA, vnA, vminus_ln2_hi);
float16x8_t vtB = vfmaq_f16(vxB, vnB, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
vtA = vfmaq_f16(vtA, vnA, vminus_ln2_lo);
vtB = vfmaq_f16(vtB, vnB, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
const float16x8_t vpA = vfmaq_f16(vc1, vc2, vtA);
const float16x8_t vpB = vfmaq_f16(vc1, vc2, vtB);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
vtA = vmulq_f16(vtA, vsA);
vtB = vmulq_f16(vtB, vsB);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
float16x8_t vfA = vfmaq_f16(vsA, vpA, vtA);
const uint16x8_t vmA = vcltq_f16(vxA, vdenorm_cutoff);
float16x8_t vfB = vfmaq_f16(vsB, vpB, vtB);
const uint16x8_t vmB = vcltq_f16(vxB, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vfA = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfA), vmA));
vfB = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfB), vmB));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfA)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfB)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc1 = vaddq_f16(vacc1, vf4);
vacc2 = vaddq_f16(vacc2, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
vacc2 = vaddq_f16(vacc2, vf8);
vacc0 = vaddq_f16(vacc0, vf9);
vacc1 = vaddq_f16(vacc1, vfA);
vacc2 = vaddq_f16(vacc2, vfB);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc0 = vaddq_f16(vacc0, vacc2);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 13,994 | 45.963087 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x96-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96_acc6(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc4 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc5 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viA = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viB = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
const float16x8_t vxA = vsubq_f16(viA, vi_max);
const float16x8_t vxB = vsubq_f16(viB, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
float16x8_t vnA = vfmaq_f16(vmagic_bias, vxA, vlog2e);
float16x8_t vnB = vfmaq_f16(vmagic_bias, vxB, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
const float16x8_t vsA = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnA), 10));
const float16x8_t vsB = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnB), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
vnA = vsubq_f16(vnA, vmagic_bias);
vnB = vsubq_f16(vnB, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
float16x8_t vtA = vfmaq_f16(vxA, vnA, vminus_ln2_hi);
float16x8_t vtB = vfmaq_f16(vxB, vnB, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
vtA = vfmaq_f16(vtA, vnA, vminus_ln2_lo);
vtB = vfmaq_f16(vtB, vnB, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
const float16x8_t vpA = vfmaq_f16(vc1, vc2, vtA);
const float16x8_t vpB = vfmaq_f16(vc1, vc2, vtB);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
vtA = vmulq_f16(vtA, vsA);
vtB = vmulq_f16(vtB, vsB);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
float16x8_t vfA = vfmaq_f16(vsA, vpA, vtA);
const uint16x8_t vmA = vcltq_f16(vxA, vdenorm_cutoff);
float16x8_t vfB = vfmaq_f16(vsB, vpB, vtB);
const uint16x8_t vmB = vcltq_f16(vxB, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vfA = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfA), vmA));
vfB = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfB), vmB));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfA)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfB)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc1 = vaddq_f16(vacc1, vf1);
vacc2 = vaddq_f16(vacc2, vf2);
vacc3 = vaddq_f16(vacc3, vf3);
vacc4 = vaddq_f16(vacc4, vf4);
vacc5 = vaddq_f16(vacc5, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc1 = vaddq_f16(vacc1, vf7);
vacc2 = vaddq_f16(vacc2, vf8);
vacc3 = vaddq_f16(vacc3, vf9);
vacc4 = vaddq_f16(vacc4, vfA);
vacc5 = vaddq_f16(vacc5, vfB);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc4 = vaddq_f16(vacc4, vacc5);
vacc0 = vaddq_f16(vacc0, vacc2);
vacc0 = vaddq_f16(vacc0, vacc4);
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 14,282 | 45.983553 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x96(
size_t batch,
const void* input,
const void* max,
void* output,
void* sum,
const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float16x8_t vi_max = vreinterpretq_f16_u16(vld1q_dup_u16(max));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.log2e));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.magic_bias));
const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_hi));
const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.minus_ln2_lo));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c2));
const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.c1));
const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr2_p2.denorm_cutoff));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 96 * sizeof(uint16_t); batch -= 96 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vi9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viA = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t viB = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
const float16x8_t vxA = vsubq_f16(viA, vi_max);
const float16x8_t vxB = vsubq_f16(viB, vi_max);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
float16x8_t vnA = vfmaq_f16(vmagic_bias, vxA, vlog2e);
float16x8_t vnB = vfmaq_f16(vmagic_bias, vxB, vlog2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
const float16x8_t vsA = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnA), 10));
const float16x8_t vsB = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vnB), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
vn1 = vsubq_f16(vn1, vmagic_bias);
vn2 = vsubq_f16(vn2, vmagic_bias);
vn3 = vsubq_f16(vn3, vmagic_bias);
vn4 = vsubq_f16(vn4, vmagic_bias);
vn5 = vsubq_f16(vn5, vmagic_bias);
vn6 = vsubq_f16(vn6, vmagic_bias);
vn7 = vsubq_f16(vn7, vmagic_bias);
vn8 = vsubq_f16(vn8, vmagic_bias);
vn9 = vsubq_f16(vn9, vmagic_bias);
vnA = vsubq_f16(vnA, vmagic_bias);
vnB = vsubq_f16(vnB, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
float16x8_t vtA = vfmaq_f16(vxA, vnA, vminus_ln2_hi);
float16x8_t vtB = vfmaq_f16(vxB, vnB, vminus_ln2_hi);
vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
vtA = vfmaq_f16(vtA, vnA, vminus_ln2_lo);
vtB = vfmaq_f16(vtB, vnB, vminus_ln2_lo);
const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
const float16x8_t vpA = vfmaq_f16(vc1, vc2, vtA);
const float16x8_t vpB = vfmaq_f16(vc1, vc2, vtB);
vt0 = vmulq_f16(vt0, vs0);
vt1 = vmulq_f16(vt1, vs1);
vt2 = vmulq_f16(vt2, vs2);
vt3 = vmulq_f16(vt3, vs3);
vt4 = vmulq_f16(vt4, vs4);
vt5 = vmulq_f16(vt5, vs5);
vt6 = vmulq_f16(vt6, vs6);
vt7 = vmulq_f16(vt7, vs7);
vt8 = vmulq_f16(vt8, vs8);
vt9 = vmulq_f16(vt9, vs9);
vtA = vmulq_f16(vtA, vsA);
vtB = vmulq_f16(vtB, vsB);
float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
float16x8_t vfA = vfmaq_f16(vsA, vpA, vtA);
const uint16x8_t vmA = vcltq_f16(vxA, vdenorm_cutoff);
float16x8_t vfB = vfmaq_f16(vsB, vpB, vtB);
const uint16x8_t vmB = vcltq_f16(vxB, vdenorm_cutoff);
vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
vfA = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfA), vmA));
vfB = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vfB), vmB));
vst1q_u16(o, vreinterpretq_u16_f16(vf0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf3)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf4)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf5)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf6)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf7)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf8)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vf9)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfA)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vfB)); o += 8;
vacc0 = vaddq_f16(vacc0, vf0);
vacc0 = vaddq_f16(vacc0, vf1);
vacc0 = vaddq_f16(vacc0, vf2);
vacc0 = vaddq_f16(vacc0, vf3);
vacc0 = vaddq_f16(vacc0, vf4);
vacc0 = vaddq_f16(vacc0, vf5);
vacc0 = vaddq_f16(vacc0, vf6);
vacc0 = vaddq_f16(vacc0, vf7);
vacc0 = vaddq_f16(vacc0, vf8);
vacc0 = vaddq_f16(vacc0, vf9);
vacc0 = vaddq_f16(vacc0, vfA);
vacc0 = vaddq_f16(vacc0, vfB);
}
float16x8_t vacc = vacc0;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
vst1q_u16(o, vreinterpretq_u16_f16(vf)); o += 8;
vacc = vaddq_f16(vacc, vf);
}
float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
if (batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vi = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vx = vsubq_f16(vi, vi_max);
float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
vt = vmulq_f16(vt, vs);
float16x8_t vf = vfmaq_f16(vs, vp, vt);
const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
float16x4_t vf_lo = vget_low_f16(vf);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vf_lo)); o += 4;
vacc_lo = vadd_f16(vacc_lo, vf_lo);
vf_lo = vget_high_f16(vf);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
vf_lo = vext_f16(vf_lo, vf_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vf_lo), 0);
vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
}
}
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
vst1_lane_u16(sum, vreinterpret_u16_f16(vacc_lo), 0);
}
| 13,797 | 45.931973 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rmax/f16-rmax-f16c.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/rmax.h>
void xnn_f16_rmax_ukernel__f16c(
size_t batch,
const void* input,
void* output) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != 0);
assert(output != 0);
const uint16_t* i = (const uint16_t*) input;
__m128i vmax_init = _mm_shufflelo_epi16(_mm_loadl_epi64((const __m128i*) i), _MM_SHUFFLE(0, 0, 0, 0));
vmax_init = _mm_unpacklo_epi64(vmax_init, vmax_init);
__m256 vmax0 = _mm256_cvtph_ps(vmax_init);
__m256 vmax1 = vmax0;
__m256 vmax2 = vmax0;
__m256 vmax3 = vmax0;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
vmax0 = _mm256_max_ps(vmax0, vx0);
vmax1 = _mm256_max_ps(vmax1, vx1);
vmax2 = _mm256_max_ps(vmax2, vx2);
vmax3 = _mm256_max_ps(vmax3, vx3);
}
__m256 vmax = _mm256_max_ps(_mm256_max_ps(vmax0, vmax1), _mm256_max_ps(vmax2, vmax3));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vmax = _mm256_max_ps(vmax, vx);
}
__m128 vmax_lo = _mm_max_ps(_mm256_castps256_ps128(vmax), _mm256_extractf128_ps(vmax, 1));
if XNN_UNLIKELY(batch != 0) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m128 vx_lo = _mm256_castps256_ps128(vx);
if (batch & (4 * sizeof(uint16_t))) {
vmax_lo = _mm_max_ps(vmax_lo, vx_lo);
vx_lo = _mm256_extractf128_ps(vx, 1);
}
if (batch & (2 * sizeof(uint16_t))) {
vmax_lo = _mm_blend_ps(_mm_max_ps(vmax_lo, vx_lo), vmax_lo, 0xC);
vx_lo = _mm_movehl_ps(vx_lo, vx_lo);
}
if (batch & (1 * sizeof(uint16_t))) {
vmax_lo = _mm_max_ss(vmax_lo, vx_lo);
}
}
vmax_lo = _mm_max_ps(vmax_lo, _mm_movehl_ps(vmax_lo, vmax_lo));
vmax_lo = _mm_max_ss(vmax_lo, _mm_movehdup_ps(vmax_lo));
*((uint16_t*) output) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vmax_lo, _MM_FROUND_TO_NEAREST_INT), 0);
}
| 2,554 | 36.028986 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rmax/f16-rmax-neonfp16arith.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/rmax.h>
void xnn_f16_rmax_ukernel__neonfp16arith(
size_t batch,
const void* input,
void* output) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != 0);
assert(output != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vmax0 = vreinterpretq_f16_u16(vld1q_dup_u16(i));
float16x8_t vmax1 = vmax0;
float16x8_t vmax2 = vmax0;
float16x8_t vmax3 = vmax0;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vmax0 = vmaxq_f16(vmax0, vx0);
vmax1 = vmaxq_f16(vmax1, vx1);
vmax2 = vmaxq_f16(vmax2, vx2);
vmax3 = vmaxq_f16(vmax3, vx3);
}
float16x8_t vmax = vmaxq_f16(vmaxq_f16(vmax0, vmax1), vmaxq_f16(vmax2, vmax3));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vmax = vmaxq_f16(vmax, vx);
}
float16x4_t vmax_lo = vmax_f16(vget_low_f16(vmax), vget_high_f16(vmax));
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
float16x4_t vx_lo = vget_low_f16(vx);
if (batch & (4 * sizeof(uint16_t))) {
vmax_lo = vmax_f16(vmax_lo, vx_lo);
vx_lo = vget_high_f16(vx);
}
if (batch & (2 * sizeof(uint16_t))) {
vmax_lo = vmax_f16(vmax_lo, vext_f16(vmax_lo, vx_lo, 2));
vx_lo = vext_f16(vx_lo, vx_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vmax_lo = vmax_f16(vmax_lo, vext_f16(vmax_lo, vx_lo, 1));
}
}
#if XNN_ARCH_ARM64 && defined(__GNUC__)
*((__fp16*) o) = vmaxv_f16(vmax_lo);
#else
vmax_lo = vpmax_f16(vmax_lo, vmax_lo);
vmax_lo = vpmax_f16(vmax_lo, vmax_lo);
vst1_lane_u16(o, vreinterpret_u16_f16(vmax_lo), 0);
#endif
}
| 2,348 | 32.557143 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rsum/gen/f16-rsum-neonfp16arith-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-rsum/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_rsum_ukernel__neonfp16arith_x16_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vt0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt0);
vacc1 = vaddq_f16(vacc1, vt1);
}
vacc0 = vaddq_f16(vacc0, vacc1);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vt = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt);
}
const float16x4_t vscale = vreinterpret_f16_u16(vld1_dup_u16(¶ms->fp16arith.scale));
float16x4_t vacc = vadd_f16(vget_low_f16(vacc0), vget_high_f16(vacc0));
if XNN_UNLIKELY(batch & (4 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_dup_u16(i));
vacc = vadd_f16(vacc, vt);
}
vacc = vmul_f16(vacc, vscale);
vst1_lane_u16(o, vreinterpret_u16_f16(vacc), 0);
}
| 2,203 | 33.4375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rsum/gen/f16-rsum-neonfp16arith-x24-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-rsum/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_rsum_ukernel__neonfp16arith_x24_acc3(
size_t batch,
const void* input,
void* output,
const union xnn_f16_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const float16x8_t vt0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt0);
vacc1 = vaddq_f16(vacc1, vt1);
vacc2 = vaddq_f16(vacc2, vt2);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc0 = vaddq_f16(vacc0, vacc2);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vt = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt);
}
const float16x4_t vscale = vreinterpret_f16_u16(vld1_dup_u16(¶ms->fp16arith.scale));
float16x4_t vacc = vadd_f16(vget_low_f16(vacc0), vget_high_f16(vacc0));
if XNN_UNLIKELY(batch & (4 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_dup_u16(i));
vacc = vadd_f16(vacc, vt);
}
vacc = vmul_f16(vacc, vscale);
vst1_lane_u16(o, vreinterpret_u16_f16(vacc), 0);
}
| 2,407 | 34.411765 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rsum/gen/f16-rsum-neonfp16arith-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-rsum/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_rsum_ukernel__neonfp16arith_x32_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vt0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt0);
vacc1 = vaddq_f16(vacc1, vt1);
vacc0 = vaddq_f16(vacc0, vt2);
vacc1 = vaddq_f16(vacc1, vt3);
}
vacc0 = vaddq_f16(vacc0, vacc1);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vt = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt);
}
const float16x4_t vscale = vreinterpret_f16_u16(vld1_dup_u16(¶ms->fp16arith.scale));
float16x4_t vacc = vadd_f16(vget_low_f16(vacc0), vget_high_f16(vacc0));
if XNN_UNLIKELY(batch & (4 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_dup_u16(i));
vacc = vadd_f16(vacc, vt);
}
vacc = vmul_f16(vacc, vscale);
vst1_lane_u16(o, vreinterpret_u16_f16(vacc), 0);
}
| 2,419 | 34.588235 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rsum/gen/f16-rsum-neonfp16arith-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-rsum/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_rsum_ukernel__neonfp16arith_x32_acc4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc2 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc3 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vt0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vt3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt0);
vacc1 = vaddq_f16(vacc1, vt1);
vacc2 = vaddq_f16(vacc2, vt2);
vacc3 = vaddq_f16(vacc3, vt3);
}
vacc0 = vaddq_f16(vacc0, vacc1);
vacc2 = vaddq_f16(vacc2, vacc3);
vacc0 = vaddq_f16(vacc0, vacc2);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vt = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt);
}
const float16x4_t vscale = vreinterpret_f16_u16(vld1_dup_u16(¶ms->fp16arith.scale));
float16x4_t vacc = vadd_f16(vget_low_f16(vacc0), vget_high_f16(vacc0));
if XNN_UNLIKELY(batch & (4 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_dup_u16(i));
vacc = vadd_f16(vacc, vt);
}
vacc = vmul_f16(vacc, vscale);
vst1_lane_u16(o, vreinterpret_u16_f16(vacc), 0);
}
| 2,611 | 35.277778 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-rsum/gen/f16-rsum-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-rsum/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_rsum_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float16x8_t vacc0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vt = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc0 = vaddq_f16(vacc0, vt);
}
const float16x4_t vscale = vreinterpret_f16_u16(vld1_dup_u16(¶ms->fp16arith.scale));
float16x4_t vacc = vadd_f16(vget_low_f16(vacc0), vget_high_f16(vacc0));
if XNN_UNLIKELY(batch & (4 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
vacc = vadd_f16(vacc, vt);
}
vacc = vpadd_f16(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vt = vreinterpret_f16_u16(vld1_dup_u16(i));
vacc = vadd_f16(vacc, vt);
}
vacc = vmul_f16(vacc, vscale);
vst1_lane_u16(o, vreinterpret_u16_f16(vacc), 0);
}
| 1,805 | 31.836364 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-16x1-minmax-neonfp16arith-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_pipelined(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 16 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
intptr_t diff = *dmap++;
float16x8_t vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vw;
float16x8_t vacc89ABCDEF = vw;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc01234567 = vfmaq_f16(vacc01234567, vi01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, vi89ABCDEF, vw);
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
diff = *dmap++;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
mc -= 16 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 7,800 | 39.21134 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-16x1-minmax-neonfp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_x2(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 16 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567x0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc01234567x1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc89ABCDEFx0 = vacc01234567x0;
float16x8_t vacc89ABCDEFx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float16x8_t va01234567x0 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx0 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff0);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x0 = vfmaq_f16(vacc01234567x0, va01234567x0, vw0);
vacc89ABCDEFx0 = vfmaq_f16(vacc89ABCDEFx0, va89ABCDEFx0, vw0);
const float16x8_t va01234567x1 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx1 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff1);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw1 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x1 = vfmaq_f16(vacc01234567x1, va01234567x1, vw1);
vacc89ABCDEFx1 = vfmaq_f16(vacc89ABCDEFx1, va89ABCDEFx1, vw1);
}
float16x8_t vacc01234567 = vacc01234567x0;
float16x8_t vacc89ABCDEF = vacc89ABCDEFx0;
vacc01234567 = vaddq_f16(vacc01234567, vacc01234567x1);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vacc89ABCDEFx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
mc -= 16 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 9,174 | 41.476852 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-16x1-minmax-neonfp16arith.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 16 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 16 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
mc -= 16 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 7,609 | 39.26455 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-24x1-minmax-neonfp16arith-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_pipelined(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 24 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 24 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
intptr_t diff = *dmap++;
float16x8_t vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
float16x8_t viGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vw;
float16x8_t vacc89ABCDEF = vw;
float16x8_t vaccGHIJKLMN = vw;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc01234567 = vfmaq_f16(vacc01234567, vi01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, vi89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, viGHIJKLMN, vw);
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
diff = *dmap++;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
viGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 24;
mc -= 24 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 9,750 | 40.849785 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-24x1-minmax-neonfp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_x2(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 24 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 24 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567x0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc01234567x1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc89ABCDEFx0 = vacc01234567x0;
float16x8_t vacc89ABCDEFx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vaccGHIJKLMNx0 = vacc01234567x0;
float16x8_t vaccGHIJKLMNx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float16x8_t va01234567x0 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx0 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMNx0 = vreinterpretq_f16_u16(vld1q_u16(i + 16));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff0);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x0 = vfmaq_f16(vacc01234567x0, va01234567x0, vw0);
vacc89ABCDEFx0 = vfmaq_f16(vacc89ABCDEFx0, va89ABCDEFx0, vw0);
vaccGHIJKLMNx0 = vfmaq_f16(vaccGHIJKLMNx0, vaGHIJKLMNx0, vw0);
const float16x8_t va01234567x1 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx1 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMNx1 = vreinterpretq_f16_u16(vld1q_u16(i + 16));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff1);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw1 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x1 = vfmaq_f16(vacc01234567x1, va01234567x1, vw1);
vacc89ABCDEFx1 = vfmaq_f16(vacc89ABCDEFx1, va89ABCDEFx1, vw1);
vaccGHIJKLMNx1 = vfmaq_f16(vaccGHIJKLMNx1, vaGHIJKLMNx1, vw1);
}
float16x8_t vacc01234567 = vacc01234567x0;
float16x8_t vacc89ABCDEF = vacc89ABCDEFx0;
float16x8_t vaccGHIJKLMN = vaccGHIJKLMNx0;
vacc01234567 = vaddq_f16(vacc01234567, vacc01234567x1);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vacc89ABCDEFx1);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vaccGHIJKLMNx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 24;
mc -= 24 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 11,578 | 43.363985 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-24x1-minmax-neonfp16arith.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 24 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 24 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
float16x8_t vaccGHIJKLMN = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 24;
mc -= 24 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 8 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 9,516 | 40.92511 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-32x1-minmax-neonfp16arith-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_pipelined(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 32 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
intptr_t diff = *dmap++;
float16x8_t vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
float16x8_t viGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
float16x8_t viOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i + 24));
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vw;
float16x8_t vacc89ABCDEF = vw;
float16x8_t vaccGHIJKLMN = vw;
float16x8_t vaccOPQRSTUV = vw;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc01234567 = vfmaq_f16(vacc01234567, vi01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, vi89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, viGHIJKLMN, vw);
vaccOPQRSTUV = vfmaq_f16(vaccOPQRSTUV, viOPQRSTUV, vw);
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
diff = *dmap++;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
vi89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
viGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
viOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i + 24));
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
float16x8_t voutOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
voutOPQRSTUV = vmaxq_f16(voutOPQRSTUV, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
vst1q_u16(o + 24, vreinterpretq_u16_f16(voutOPQRSTUV));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 32;
mc -= 32 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 10,168 | 41.370833 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-32x1-minmax-neonfp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_x2(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 32 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567x0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc01234567x1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vacc89ABCDEFx0 = vacc01234567x0;
float16x8_t vacc89ABCDEFx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vaccGHIJKLMNx0 = vacc01234567x0;
float16x8_t vaccGHIJKLMNx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vaccOPQRSTUVx0 = vacc01234567x0;
float16x8_t vaccOPQRSTUVx1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float16x8_t va01234567x0 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx0 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMNx0 = vreinterpretq_f16_u16(vld1q_u16(i + 16));
const float16x8_t vaOPQRSTUVx0 = vreinterpretq_f16_u16(vld1q_u16(i + 24));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff0);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x0 = vfmaq_f16(vacc01234567x0, va01234567x0, vw0);
vacc89ABCDEFx0 = vfmaq_f16(vacc89ABCDEFx0, va89ABCDEFx0, vw0);
vaccGHIJKLMNx0 = vfmaq_f16(vaccGHIJKLMNx0, vaGHIJKLMNx0, vw0);
vaccOPQRSTUVx0 = vfmaq_f16(vaccOPQRSTUVx0, vaOPQRSTUVx0, vw0);
const float16x8_t va01234567x1 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEFx1 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMNx1 = vreinterpretq_f16_u16(vld1q_u16(i + 16));
const float16x8_t vaOPQRSTUVx1 = vreinterpretq_f16_u16(vld1q_u16(i + 24));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff1);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw1 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x1 = vfmaq_f16(vacc01234567x1, va01234567x1, vw1);
vacc89ABCDEFx1 = vfmaq_f16(vacc89ABCDEFx1, va89ABCDEFx1, vw1);
vaccGHIJKLMNx1 = vfmaq_f16(vaccGHIJKLMNx1, vaGHIJKLMNx1, vw1);
vaccOPQRSTUVx1 = vfmaq_f16(vaccOPQRSTUVx1, vaOPQRSTUVx1, vw1);
}
float16x8_t vacc01234567 = vacc01234567x0;
float16x8_t vacc89ABCDEF = vacc89ABCDEFx0;
float16x8_t vaccGHIJKLMN = vaccGHIJKLMNx0;
float16x8_t vaccOPQRSTUV = vaccOPQRSTUVx0;
vacc01234567 = vaddq_f16(vacc01234567, vacc01234567x1);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vacc89ABCDEFx1);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vaccGHIJKLMNx1);
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vaccOPQRSTUVx1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
const float16x8_t vaOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i + 24));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vw);
vaccOPQRSTUV = vfmaq_f16(vaccOPQRSTUV, vaOPQRSTUV, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
float16x8_t voutOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
voutOPQRSTUV = vmaxq_f16(voutOPQRSTUV, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
vst1q_u16(o + 24, vreinterpretq_u16_f16(voutOPQRSTUV));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 32;
mc -= 32 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 12,450 | 44.441606 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-32x1-minmax-neonfp16arith.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 32 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 32 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
float16x8_t vaccGHIJKLMN = vacc01234567;
float16x8_t vaccOPQRSTUV = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vaGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i + 16));
const float16x8_t vaOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i + 24));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vw);
vaccOPQRSTUV = vfmaq_f16(vaccOPQRSTUV, vaOPQRSTUV, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
float16x8_t voutGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
float16x8_t voutOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
voutGHIJKLMN = vmaxq_f16(voutGHIJKLMN, vmin);
voutOPQRSTUV = vmaxq_f16(voutOPQRSTUV, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
vst1q_u16(o + 16, vreinterpretq_u16_f16(voutGHIJKLMN));
vst1q_u16(o + 24, vreinterpretq_u16_f16(voutOPQRSTUV));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 32;
mc -= 32 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 16 * sizeof(uint16_t);
if (mc & (16 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc89ABCDEF = vacc01234567;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t va89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i + 8));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
float16x8_t vout89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vout89ABCDEF = vmaxq_f16(vout89ABCDEF, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
vst1q_u16(o + 8, vreinterpretq_u16_f16(vout89ABCDEF));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 16;
}
output_decrement += 8 * sizeof(uint16_t);
if (mc & (8 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
}
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 9,891 | 41.454936 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-8x1-minmax-neonfp16arith-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_pipelined(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 8 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
intptr_t diff = *dmap++;
float16x8_t vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vw;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc01234567 = vfmaq_f16(vacc01234567, vi01234567, vw);
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
diff = *dmap++;
vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vi01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
mc -= 8 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 6,234 | 37.726708 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-8x1-minmax-neonfp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_x2(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 8 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567x0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
float16x8_t vacc01234567x1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const float16x8_t va01234567x0 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff0);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw0 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x0 = vfmaq_f16(vacc01234567x0, va01234567x0, vw0);
const float16x8_t va01234567x1 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff1);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw1 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567x1 = vfmaq_f16(vacc01234567x1, va01234567x1, vw1);
}
float16x8_t vacc01234567 = vacc01234567x0;
vacc01234567 = vaddq_f16(vacc01234567, vacc01234567x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
mc -= 8 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 7,155 | 39.429379 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-spmm/gen/f16-spmm-8x1-minmax-neonfp16arith.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-spmm/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith(
size_t mc,
size_t nc,
const void* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
void* output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(uint16_t) == 0);
assert(nc != 0);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
#if XNN_ARCH_ARM64
const uint16x8x2_t vminmax = vld2q_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]);
const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]);
#else
// vld2_dup is to work around aarch32 clang bug with vld1q_dup
const uint16x4x2_t vminmax = vld2_dup_u16(¶ms->fp16arith.min);
const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0]));
const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1]));
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(uint16_t);
while XNN_LIKELY(mc >= 8 * sizeof(uint16_t)) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
xnn_prefetch_to_l1(i + 32);
const float16x8_t vw = vreinterpretq_f16_u16(vld1q_dup_u16(w)); w += 1;
xnn_prefetch_to_l1(w + 64);
vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vw);
} while (--nnz != 0);
}
float16x8_t vout01234567 = vminq_f16(vacc01234567, vmax);
vout01234567 = vmaxq_f16(vout01234567, vmin);
vst1q_u16(o, vreinterpretq_u16_f16(vout01234567));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 8;
mc -= 8 * sizeof(uint16_t);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(uint16_t);
if (mc & (4 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0123 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0123 = vreinterpret_f16_u16(vld1_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0123 = vfma_f16(vacc0123, va0123, vw);
} while (--nnz != 0);
}
float16x4_t vout0123 = vmin_f16(vacc0123, vget_low_f16(vmax));
vout0123 = vmax_f16(vout0123, vget_low_f16(vmin));
vst1_u16(o, vreinterpret_u16_f16(vout0123));
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 4;
}
output_decrement += 2 * sizeof(uint16_t);
if (mc & (2 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc01 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va01 = vreinterpret_f16_u32(vld1_dup_u32((const void*) i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc01 = vfma_f16(vacc01, va01, vw);
} while (--nnz != 0);
}
float16x4_t vout01 = vmin_f16(vacc01, vget_low_f16(vmax));
vout01 = vmax_f16(vout01, vget_low_f16(vmin));
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vout01), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 2;
}
output_decrement += 1 * sizeof(uint16_t);
if (mc & (1 * sizeof(uint16_t))) {
const uint16_t* w = (const uint16_t*) weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
float16x4_t vacc0 = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float16x4_t va0 = vreinterpret_f16_u16(vld1_dup_u16(i));
i = (const uint16_t*) ((uintptr_t) i + (uintptr_t) diff);
const float16x4_t vw = vreinterpret_f16_u16(vld1_dup_u16(w)); w += 1;
vacc0 = vfma_f16(vacc0, va0, vw);
} while (--nnz != 0);
}
float16x4_t vout0 = vmin_f16(vacc0, vget_low_f16(vmax));
vout0 = vmax_f16(vout0, vget_low_f16(vmin));
vst1_lane_u16(o, vreinterpret_u16_f16(vout0), 0);
o = (uint16_t*) ((uintptr_t) o + output_stride);
} while (--n != 0);
o = (uint16_t*) ((uintptr_t) o - output_decrement);
i += 1;
}
}
}
| 6,085 | 37.764331 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,503 | 34.04 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,385 | 29.589744 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vaddh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,379 | 26.058824 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vaddh_f16(va0, vb0);
float16_t vacc1 = vaddh_f16(va1, vb1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vaddh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,841 | 25.314286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vaddh_f16(va0, vb0);
float16_t vacc1 = vaddh_f16(va1, vb1);
float16_t vacc2 = vaddh_f16(va2, vb2);
float16_t vacc3 = vaddh_f16(va3, vb3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vaddh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,338 | 26.517647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vaddq_f16(va456789AB, vb456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 3,229 | 34.888889 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vadd-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vadd_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vaddq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,382 | 32.56338 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,203 | 32.726316 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,288 | 29.118421 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vaddh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,391 | 25.769231 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vaddh_f16(vacc0, vb);
vacc1 = vaddh_f16(vacc1, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vaddh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,788 | 24.557143 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vaddh_f16(vacc0, vb);
vacc1 = vaddh_f16(vacc1, vb);
vacc2 = vaddh_f16(vacc2, vb);
vacc3 = vaddh_f16(vacc3, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vaddh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,191 | 25.409639 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
float16x8_t vy456789AB = vaddq_f16(va456789AB, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,953 | 32.954023 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vaddc-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vaddc_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vaddq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,282 | 31.614286 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-aarch64-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__aarch64_neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vdivq_f16(va456789AB, vb456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 3,237 | 34.977778 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-aarch64-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,390 | 32.676056 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,503 | 34.04 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,385 | 29.589744 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vdivh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,379 | 26.058824 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vdivh_f16(va0, vb0);
float16_t vacc1 = vdivh_f16(va1, vb1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vdivh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,841 | 25.314286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdiv-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vdivh_f16(va0, vb0);
float16_t vacc1 = vdivh_f16(va1, vb1);
float16_t vacc2 = vdivh_f16(va2, vb2);
float16_t vacc3 = vdivh_f16(va3, vb3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vdivh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,338 | 26.517647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-aarch64-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__aarch64_neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
float16x8_t vy456789AB = vdivq_f16(va456789AB, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,961 | 33.045977 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-aarch64-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,290 | 31.728571 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,203 | 32.726316 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,288 | 29.118421 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vdivh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,391 | 25.769231 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vdivh_f16(vacc0, vb);
vacc1 = vdivh_f16(vacc1, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vdivh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,788 | 24.557143 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vdivc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdivc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vdivh_f16(vacc0, vb);
vacc1 = vdivh_f16(vacc1, vb);
vacc2 = vdivh_f16(vacc2, vb);
vacc3 = vdivh_f16(vacc3, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vdivh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,191 | 25.409639 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,030 | 33.05618 | 123 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.