repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,121 | 28.472222 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vmaxnmh_f16(va, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,133 | 24.2 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vmaxnmh_f16(va0, vb0);
float16_t vacc1 = vmaxnmh_f16(va1, vb1);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vmaxnmh_f16(va, vb);
*o = vacc;
}
}
| 1,438 | 23.389831 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vmaxnmh_f16(va0, vb0);
float16_t vacc1 = vmaxnmh_f16(va1, vb1);
float16_t vacc2 = vmaxnmh_f16(va2, vb2);
float16_t vacc3 = vmaxnmh_f16(va3, vb3);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vmaxnmh_f16(va, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,775 | 24.371429 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vmaxq_f16(va456789AB, vb456789AB);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,656 | 32.632911 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,002 | 29.815385 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,730 | 31.511905 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_max_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,024 | 27.928571 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vmaxnmh_f16(vacc, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,145 | 23.913043 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vmaxnmh_f16(vacc0, vb);
vacc1 = vmaxnmh_f16(vacc1, vb);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vmaxnmh_f16(vacc, vb);
*o = vacc;
}
}
| 1,385 | 22.491525 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vmaxnmh_f16(vacc0, vb);
vacc1 = vmaxnmh_f16(vacc1, vb);
vacc2 = vmaxnmh_f16(vacc2, vb);
vacc3 = vmaxnmh_f16(vacc3, vb);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vmaxnmh_f16(vacc, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,628 | 22.955882 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
float16x8_t vy456789AB = vmaxq_f16(va456789AB, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,380 | 30.328947 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmaxc-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmaxc_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vmaxq_f16(va01234567, vb);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 1,902 | 28.734375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,030 | 33.05618 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,121 | 28.472222 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vminnmh_f16(va, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,133 | 24.2 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vminnmh_f16(va0, vb0);
float16_t vacc1 = vminnmh_f16(va1, vb1);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vminnmh_f16(va, vb);
*o = vacc;
}
}
| 1,438 | 23.389831 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vminnmh_f16(va0, vb0);
float16_t vacc1 = vminnmh_f16(va1, vb1);
float16_t vacc2 = vminnmh_f16(va2, vb2);
float16_t vacc3 = vminnmh_f16(va3, vb3);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vminnmh_f16(va, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,775 | 24.371429 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vminq_f16(va456789AB, vb456789AB);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,656 | 32.632911 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmin-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmin_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vminq_f16(va01234567, vb01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,002 | 29.815385 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,730 | 31.511905 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_min_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,024 | 27.928571 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vminnmh_f16(vacc, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,145 | 23.913043 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vminnmh_f16(vacc0, vb);
vacc1 = vminnmh_f16(vacc1, vb);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vminnmh_f16(vacc, vb);
*o = vacc;
}
}
| 1,385 | 22.491525 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vminnmh_f16(vacc0, vb);
vacc1 = vminnmh_f16(vacc1, vb);
vacc2 = vminnmh_f16(vacc2, vb);
vacc3 = vminnmh_f16(vacc3, vb);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vminnmh_f16(vacc, vb);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,628 | 22.955882 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb);
float16x8_t vy456789AB = vminq_f16(va456789AB, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vminq_f16(va01234567, vb);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,380 | 30.328947 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vminc-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vminc_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vminq_f16(va01234567, vb);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vminq_f16(va01234567, vb);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 1,902 | 28.734375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,503 | 34.04 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,385 | 29.589744 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vmulh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,379 | 26.058824 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vmulh_f16(va0, vb0);
float16_t vacc1 = vmulh_f16(va1, vb1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vmulh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,841 | 25.314286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vmulh_f16(va0, vb0);
float16_t vacc1 = vmulh_f16(va1, vb1);
float16_t vacc2 = vmulh_f16(va2, vb2);
float16_t vacc3 = vmulh_f16(va3, vb3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vmulh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,338 | 26.517647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vmulq_f16(va456789AB, vb456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 3,229 | 34.888889 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmul-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmul_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vmulq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,382 | 32.56338 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,203 | 32.726316 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,288 | 29.118421 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vmulh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,391 | 25.769231 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vmulh_f16(vacc0, vb);
vacc1 = vmulh_f16(vacc1, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vmulh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,788 | 24.557143 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vmulh_f16(vacc0, vb);
vacc1 = vmulh_f16(vacc1, vb);
vacc2 = vmulh_f16(vacc2, vb);
vacc3 = vmulh_f16(vacc3, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vmulh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,191 | 25.409639 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
float16x8_t vy456789AB = vmulq_f16(va456789AB, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,953 | 32.954023 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vmulc-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vmulc_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vmulq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,282 | 31.614286 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-aarch64-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__aarch64_neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
float16x8_t vy456789AB = vdivq_f16(vb, va456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,962 | 33.057471 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-aarch64-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,291 | 31.742857 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,204 | 32.736842 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_div_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,289 | 29.131579 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vdivh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,392 | 25.788462 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vdivh_f16(vb, vacc0);
vacc1 = vdivh_f16(vb, vacc1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vdivh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,789 | 24.571429 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrdivc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrdivc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vdivh_f16(vb, vacc0);
vacc1 = vdivh_f16(vb, vacc1);
vacc2 = vdivh_f16(vb, vacc2);
vacc3 = vdivh_f16(vb, vacc3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vdivh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,192 | 25.421687 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,204 | 32.736842 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, va), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,289 | 29.131579 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,392 | 25.788462 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vsubh_f16(vb, vacc0);
vacc1 = vsubh_f16(vb, vacc1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vsubh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,789 | 24.571429 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vsubh_f16(vb, vacc0);
vacc1 = vsubh_f16(vb, vacc1);
vacc2 = vsubh_f16(vb, vacc2);
vacc3 = vsubh_f16(vb, vacc3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,192 | 25.421687 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
float16x8_t vy456789AB = vsubq_f16(vb, va456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,954 | 32.965517 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vrsubc-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vrsubc_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,283 | 31.628571 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy01234567, vy01234567), _MM_FROUND_TO_NEAREST_INT));
vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy456789AB, vy456789AB), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,454 | 36.150538 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,311 | 30.243243 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmulh_f16(vacc, vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,169 | 24.434783 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vsubh_f16(va0, vb0);
float16_t vacc1 = vsubh_f16(va1, vb1);
vacc0 = vmulh_f16(vacc0, vacc0);
vacc1 = vmulh_f16(vacc1, vacc1);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmulh_f16(vacc, vacc);
*o = vacc;
}
}
| 1,544 | 23.919355 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vsubh_f16(va0, vb0);
float16_t vacc1 = vsubh_f16(va1, vb1);
float16_t vacc2 = vsubh_f16(va2, vb2);
float16_t vacc3 = vsubh_f16(va3, vb3);
vacc0 = vmulh_f16(vacc0, vacc0);
vacc1 = vmulh_f16(vacc1, vacc1);
vacc2 = vmulh_f16(vacc2, vacc2);
vacc3 = vmulh_f16(vacc3, vacc3);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmulh_f16(vacc, vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,953 | 25.053333 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vsubq_f16(va456789AB, vb456789AB);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vy456789AB = vmulq_f16(vy456789AB, vy456789AB);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,868 | 33.566265 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiff-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiff_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,110 | 30.507463 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy01234567, vy01234567), _MM_FROUND_TO_NEAREST_INT));
vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy456789AB, vy456789AB), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,154 | 34.852273 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vy, vy), _MM_FROUND_TO_NEAREST_INT));
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,214 | 29.763889 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vacc, vb);
vacc = vmulh_f16(vacc, vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,181 | 24.148936 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vsubh_f16(vacc0, vb);
vacc1 = vsubh_f16(vacc1, vb);
vacc0 = vmulh_f16(vacc0, vacc0);
vacc1 = vmulh_f16(vacc1, vacc1);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vsubh_f16(vacc, vb);
vacc = vmulh_f16(vacc, vacc);
*o = vacc;
}
}
| 1,491 | 23.064516 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vsubh_f16(vacc0, vb);
vacc1 = vsubh_f16(vacc1, vb);
vacc2 = vsubh_f16(vacc2, vb);
vacc3 = vsubh_f16(vacc3, vb);
vacc0 = vmulh_f16(vacc0, vacc0);
vacc1 = vmulh_f16(vacc1, vacc1);
vacc2 = vmulh_f16(vacc2, vacc2);
vacc3 = vmulh_f16(vacc3, vacc3);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vacc, vb);
vacc = vmulh_f16(vacc, vacc);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 1,806 | 23.753425 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
float16x8_t vy456789AB = vsubq_f16(va456789AB, vb);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vy456789AB = vmulq_f16(vy456789AB, vy456789AB);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,592 | 31.4125 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsqrdiffc-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsqrdiffc_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmulq_f16(vy01234567, vy01234567);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,010 | 29.469697 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
const __m256 vb456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (b + 8)));
a += 16;
b += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va01234567, vb01234567), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va456789AB, vb456789AB), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,503 | 34.04 | 123 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
a += 8;
b += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 vb = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,385 | 29.589744 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,379 | 26.058824 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vsubh_f16(va0, vb0);
float16_t vacc1 = vsubh_f16(va1, vb1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,841 | 25.314286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t va2 = *a++;
const float16_t va3 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
const float16_t vb2 = *b++;
const float16_t vb3 = *b++;
float16_t vacc0 = vsubh_f16(va0, vb0);
float16_t vacc1 = vsubh_f16(va1, vb1);
float16_t vacc2 = vsubh_f16(va2, vb2);
float16_t vacc3 = vsubh_f16(va3, vb3);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
*o++ = vacc0;
*o++ = vacc1;
*o++ = vacc2;
*o++ = vacc3;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float16_t va = *a++;
const float16_t vb = *b++;
float16_t vacc = vsubh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,338 | 26.517647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb456789AB = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
float16x8_t vy456789AB = vsubq_f16(va456789AB, vb456789AB);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 3,229 | 34.888889 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsub-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vop-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsub_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,382 | 32.56338 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__f16c_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 va01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
const __m256 va456789AB = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (a + 8)));
a += 16;
__m256 vy01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va01234567, vb), _MM_FROUND_TO_NEAREST_INT));
__m256 vy456789AB = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va456789AB, vb), _MM_FROUND_TO_NEAREST_INT));
vy01234567 = _mm256_max_ps(vy01234567, vy_min);
vy456789AB = _mm256_max_ps(vy456789AB, vy_min);
vy01234567 = _mm256_min_ps(vy01234567, vy_max);
vy456789AB = _mm256_min_ps(vy456789AB, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy456789AB, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,203 | 32.726316 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__f16c_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_cvtph_ps(_mm_set1_epi16((short) *b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
a += 8;
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 va = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) a));
__m256 vy = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(va, vb), _MM_FROUND_TO_NEAREST_INT));
vy = _mm256_max_ps(vy, vy_min);
vy = _mm256_min_ps(vy, vy_max);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 2,288 | 29.118421 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-fp16arith-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__fp16arith_x1(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
| 1,391 | 25.769231 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-fp16arith-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vsubh_f16(vacc0, vb);
vacc1 = vsubh_f16(vacc1, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vsubh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 1,788 | 24.557143 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-fp16arith-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-fp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__fp16arith_x4(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 4 * sizeof(float16_t); batch -= 4 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
float16_t vacc2 = a[2];
float16_t vacc3 = a[3];
a += 4;
vacc0 = vsubh_f16(vacc0, vb);
vacc1 = vsubh_f16(vacc1, vb);
vacc2 = vsubh_f16(vacc2, vb);
vacc3 = vsubh_f16(vacc3, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc2 = vmaxnmh_f16(vacc2, vy_min);
vacc3 = vmaxnmh_f16(vacc3, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
vacc2 = vminnmh_f16(vacc2, vy_max);
vacc3 = vminnmh_f16(vacc3, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o[2] = vacc2;
o[3] = vacc3;
o += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float16_t vacc = *a++;
vacc = vsubh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o++ = vacc;
batch -= sizeof(float16_t);
} while (batch != 0);
}
}
| 2,191 | 25.409639 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t va456789AB = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
float16x8_t vy456789AB = vsubq_f16(va456789AB, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy456789AB = vmaxq_f16(vy456789AB, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vy456789AB = vminq_f16(vy456789AB, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy456789AB)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,953 | 32.954023 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vbinary/gen/f16-vsubc-minmax-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vbinary/vopc-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vsubc_minmax_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vsubq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
| 2,282 | 31.614286 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vclamp/gen/f16-vclamp-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vclamp/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vclamp_ukernel__f16c_x16(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vacc01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc01234567 = _mm256_max_ps(vacc01234567, vy_min);
vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEF, vy_min);
vacc01234567 = _mm256_min_ps(vacc01234567, vy_max);
vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_max_ps(vacc, vy_min);
vacc = _mm256_min_ps(vacc, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_max_ps(vacc, vy_min);
vacc = _mm256_min_ps(vacc, vy_max);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = _mm_extract_epi16(vh, 0);
}
}
}
| 2,583 | 31.3 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vclamp/gen/f16-vclamp-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vclamp/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vclamp_ukernel__f16c_x8(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const __m256 vy_min = _mm256_load_ps(params->avx.min);
const __m256 vy_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc = _mm256_max_ps(vacc, vy_min);
vacc = _mm256_min_ps(vacc, vy_max);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
vacc = _mm256_max_ps(vacc, vy_min);
vacc = _mm256_min_ps(vacc, vy_max);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = _mm_extract_epi16(vh, 0);
}
}
}
| 1,890 | 28.092308 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vclamp/gen/f16-vclamp-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vclamp/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vclamp_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc01234567 = vmaxq_f16(vacc01234567, vy_min);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vy_min);
vacc01234567 = vminq_f16(vacc01234567, vy_max);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vacc01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc89ABCDEF)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vmaxq_f16(vacc, vy_min);
vacc = vminq_f16(vacc, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vmaxq_f16(vacc, vy_min);
vacc = vminq_f16(vacc, vy_max);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,458 | 32.684932 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vclamp/gen/f16-vclamp-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vclamp/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vclamp_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vmaxq_f16(vacc, vy_min);
vacc = vminq_f16(vacc, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
vacc = vmaxq_f16(vacc, vy_min);
vacc = vminq_f16(vacc, vy_max);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 1,889 | 30.5 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-velu/gen/f16-velu-avx2-rr1-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-velu/avx2-rr1-p3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_velu_ukernel__avx2_rr1_p3_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p3.prescale);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p3.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p3.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p3.c1);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p3.beta);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vt0 = _mm256_mul_ps(vt0, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vt1 = _mm256_mul_ps(vt1, valpha);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
const __m256 ve0 = _mm256_fmadd_ps(vp0, vt0, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, vt1, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, valpha);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
const __m256 ve = _mm256_fmadd_ps(vp, vt, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, valpha);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
const __m256 ve = _mm256_fmadd_ps(vp, vt, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 5,496 | 36.394558 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-velu/gen/f16-velu-avx2-rr1-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-velu/avx2-rr1-p3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_velu_ukernel__avx2_rr1_p3_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p3.prescale);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p3.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p3.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p3.c1);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p3.beta);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, valpha);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
const __m256 ve = _mm256_fmadd_ps(vp, vt, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, valpha);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
const __m256 ve = _mm256_fmadd_ps(vp, vt, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vh, 0);
}
}
}
| 3,680 | 34.737864 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-velu/gen/f16-velu-neonfp16arith-rr1-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-velu/neonfp16arith-rr1-p3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vprescale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.prescale));
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.sat_cutoff));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.magic_bias));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.log2e));
const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_ln2));
const float16x8_t vc3 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c3));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c2));
const float16x8_t vminus_alpha = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_alpha));
const float16x8_t vbeta = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.beta));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vmulq_f16(vx0, vprescale);
float16x8_t vz1 = vmulq_f16(vx1, vprescale);
vz0 = vmaxq_f16(vz0, vsat_cutoff);
vz1 = vmaxq_f16(vz1, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vlog2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vlog2e);
float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
float16x8_t vt0 = vfmaq_f16(vz0, vn0, vminus_ln2);
float16x8_t vt1 = vfmaq_f16(vz1, vn1, vminus_ln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
vp0 = vmulq_f16(vp0, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
vp1 = vmulq_f16(vp1, vt1);
vt0 = vmulq_f16(vt0, vs0);
vs0 = vfmsq_f16(vminus_alpha, vs0, vminus_alpha);
vt1 = vmulq_f16(vt1, vs1);
vs1 = vfmsq_f16(vminus_alpha, vs1, vminus_alpha);
vp0 = vfmaq_f16(vt0, vp0, vt0);
vp1 = vfmaq_f16(vt1, vp1, vt1);
float16x8_t ve0 = vfmsq_f16(vs0, vp0, vminus_alpha);
const uint16x8_t vm0 = vcltq_s16(vreinterpretq_s16_f16(vx0), vmovq_n_s16(0));
float16x8_t ve1 = vfmsq_f16(vs1, vp1, vminus_alpha);
const uint16x8_t vm1 = vcltq_s16(vreinterpretq_s16_f16(vx1), vmovq_n_s16(0));
vx0 = vmulq_f16(vx0, vbeta);
vx1 = vmulq_f16(vx1, vbeta);
const float16x8_t vy0 = vbslq_f16(vm0, ve0, vx0);
const float16x8_t vy1 = vbslq_f16(vm1, ve1, vx1);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vmulq_f16(vx, vprescale);
vz = vmaxq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e);
float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
vt = vmulq_f16(vt, vs);
vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha);
vp = vfmaq_f16(vt, vp, vt);
float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha);
const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vx = vmulq_f16(vx, vbeta);
const float16x8_t vy = vbslq_f16(vm, ve, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vmulq_f16(vx, vprescale);
vz = vmaxq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e);
float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
vt = vmulq_f16(vt, vs);
vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha);
vp = vfmaq_f16(vt, vp, vt);
float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha);
const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vx = vmulq_f16(vx, vbeta);
float16x8_t vy = vbslq_f16(vm, ve, vx);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 5,750 | 38.662069 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-velu/gen/f16-velu-neonfp16arith-rr1-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-velu/neonfp16arith-rr1-p3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vprescale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.prescale));
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.sat_cutoff));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.magic_bias));
const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.log2e));
const float16x8_t vminus_ln2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_ln2));
const float16x8_t vc3 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c3));
const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.c2));
const float16x8_t vminus_alpha = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.minus_alpha));
const float16x8_t vbeta = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith_rr1_p3.beta));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vmulq_f16(vx, vprescale);
vz = vmaxq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e);
float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
vt = vmulq_f16(vt, vs);
vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha);
vp = vfmaq_f16(vt, vp, vt);
float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha);
const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vx = vmulq_f16(vx, vbeta);
const float16x8_t vy = vbslq_f16(vm, ve, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vmulq_f16(vx, vprescale);
vz = vmaxq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vlog2e);
float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
float16x8_t vt = vfmaq_f16(vz, vn, vminus_ln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vmulq_f16(vp, vt);
vt = vmulq_f16(vt, vs);
vs = vfmsq_f16(vminus_alpha, vs, vminus_alpha);
vp = vfmaq_f16(vt, vp, vt);
float16x8_t ve = vfmsq_f16(vs, vp, vminus_alpha);
const uint16x8_t vm = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vx = vmulq_f16(vx, vbeta);
float16x8_t vy = vbslq_f16(vm, ve, vx);
float16x4_t vy_lo = vget_low_f16(vy);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o += 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 3,902 | 39.237113 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vhswish/gen/f16-vhswish-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vhswish/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vhswish_ukernel__f16c_x16(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vthree = _mm256_load_ps(params->avx.three);
const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
__m256 vx01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vx89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vx01234567, vthree), _MM_FROUND_TO_NEAREST_INT);
vx01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx01234567, vsixth), _MM_FROUND_TO_NEAREST_INT));
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vx89ABCDEF, vthree), _MM_FROUND_TO_NEAREST_INT);
vx89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx89ABCDEF, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc01234567 = _mm_max_epi16(vacc01234567, vzero);
vacc89ABCDEF = _mm_max_epi16(vacc89ABCDEF, vzero);
vacc01234567 = _mm_min_epi16(vacc01234567, vsix);
vacc89ABCDEF = _mm_min_epi16(vacc89ABCDEF, vsix);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vx01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vx89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vacc01234567);
_mm_storeu_si128((__m128i*) (o + 8), vacc89ABCDEF);
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vacc);
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vacc);
vacc = _mm_unpackhi_epi64(vacc, vacc);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vacc);
vacc = _mm_srli_epi64(vacc, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vacc, 0);
}
}
}
| 3,765 | 38.642105 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vhswish/gen/f16-vhswish-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vhswish/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vhswish_ukernel__f16c_x8(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vthree = _mm256_load_ps(params->avx.three);
const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vacc);
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_TO_NEAREST_INT);
vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_TO_NEAREST_INT));
vacc = _mm_max_epi16(vacc, vzero);
vacc = _mm_min_epi16(vacc, vsix);
vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vacc);
vacc = _mm_unpackhi_epi64(vacc, vacc);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vacc);
vacc = _mm_srli_epi64(vacc, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vacc, 0);
}
}
}
| 2,488 | 33.569444 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vhswish/gen/f16-vhswish-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vhswish/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vhswish_ukernel__neonfp16arith_x16(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const float16x8_t vsixth = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.sixth));
const float16x8_t vthree = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.three));
const int16x8_t vsix = vreinterpretq_s16_u16(vld1q_dup_u16(¶ms->fp16arith.six));
const int16x8_t vzero = vdupq_n_s16(0);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
float16x8_t vx01234567 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vx89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc01234567 = vaddq_f16(vx01234567, vthree);
vx01234567 = vmulq_f16(vx01234567, vsixth);
float16x8_t vacc89ABCDEF = vaddq_f16(vx89ABCDEF, vthree);
vx89ABCDEF = vmulq_f16(vx89ABCDEF, vsixth);
vacc01234567 = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc01234567), vzero));
vacc89ABCDEF = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc89ABCDEF), vzero));
vacc01234567 = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc01234567), vsix));
vacc89ABCDEF = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc89ABCDEF), vsix));
vacc01234567 = vmulq_f16(vacc01234567, vx01234567);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vx89ABCDEF);
vst1q_u16(o, vreinterpretq_u16_f16(vacc01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc89ABCDEF)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc = vaddq_f16(vx, vthree);
vx = vmulq_f16(vx, vsixth);
vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
vacc = vmulq_f16(vacc, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vacc = vaddq_f16(vx, vthree);
vx = vmulq_f16(vx, vsixth);
vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
vacc = vmulq_f16(vacc, vx);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 3,495 | 38.280899 | 96 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vhswish/gen/f16-vhswish-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vhswish/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vhswish_ukernel__neonfp16arith_x8(
size_t batch,
const void* restrict input,
void* restrict output,
const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
const float16x8_t vsixth = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.sixth));
const float16x8_t vthree = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.three));
const int16x8_t vsix = vreinterpretq_s16_u16(vld1q_dup_u16(¶ms->fp16arith.six));
const int16x8_t vzero = vdupq_n_s16(0);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc = vaddq_f16(vx, vthree);
vx = vmulq_f16(vx, vsixth);
vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
vacc = vmulq_f16(vacc, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vacc = vaddq_f16(vx, vthree);
vx = vmulq_f16(vx, vsixth);
vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
vacc = vmulq_f16(vacc, vx);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,418 | 34.573529 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vlrelu/gen/f16-vlrelu-f16c-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vlrelu/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vlrelu_ukernel__f16c_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vslope = _mm256_load_ps(params->avx.slope);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 vx01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vx89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
__m256 vacc01234567 = _mm256_mul_ps(vx01234567, vslope);
__m256 vacc89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vslope);
vacc01234567 = _mm256_blendv_ps(vx01234567, vacc01234567, vx01234567);
vacc89ABCDEF = _mm256_blendv_ps(vx89ABCDEF, vacc89ABCDEF, vx89ABCDEF);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = _mm_extract_epi16(vh, 0);
}
}
}
| 2,590 | 31.3875 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vlrelu/gen/f16-vlrelu-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vlrelu/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f16_vlrelu_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vslope = _mm256_load_ps(params->avx.slope);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
__m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
if (batch & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vh);
vh = _mm_unpackhi_epi64(vh, vh);
o += 4;
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vh);
vh = _mm_srli_epi64(vh, 32);
o += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
*o = _mm_extract_epi16(vh, 0);
}
}
}
| 1,841 | 27.338462 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vlrelu/gen/f16-vlrelu-neonfp16arith-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vlrelu/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vlrelu_ukernel__neonfp16arith_x16(
size_t batch,
const void* input,
void* output,
const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vslope = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.slope));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vx01234567 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc01234567 = vmulq_f16(vx01234567, vslope);
const uint16x8_t vmask01234567 = vcltq_s16(vreinterpretq_s16_f16(vx01234567), vmovq_n_s16(0));
float16x8_t vacc89ABCDEF = vmulq_f16(vx89ABCDEF, vslope);
const uint16x8_t vmask89ABCDEF = vcltq_s16(vreinterpretq_s16_f16(vx89ABCDEF), vmovq_n_s16(0));
vacc01234567 = vbslq_f16(vmask01234567, vacc01234567, vx01234567);
vacc89ABCDEF = vbslq_f16(vmask89ABCDEF, vacc89ABCDEF, vx89ABCDEF);
vst1q_u16(o, vreinterpretq_u16_f16(vacc01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vacc89ABCDEF)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc = vmulq_f16(vx, vslope);
const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vacc = vbslq_f16(vmask, vacc, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vacc = vmulq_f16(vx, vslope);
const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vacc = vbslq_f16(vmask, vacc, vx);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,812 | 37.013514 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vlrelu/gen/f16-vlrelu-neonfp16arith-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vlrelu/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f16_vlrelu_ukernel__neonfp16arith_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vslope = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.slope));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx01234567 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc01234567 = vmulq_f16(vx01234567, vslope);
const uint16x8_t vmask01234567 = vcltq_s16(vreinterpretq_s16_f16(vx01234567), vmovq_n_s16(0));
vacc01234567 = vbslq_f16(vmask01234567, vacc01234567, vx01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vacc01234567)); o += 8;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vacc = vmulq_f16(vx, vslope);
const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vacc = vbslq_f16(vmask, vacc, vx);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i));
float16x8_t vacc = vmulq_f16(vx, vslope);
const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
vacc = vbslq_f16(vmask, vacc, vx);
float16x4_t vacc_lo = vget_low_f16(vacc);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vget_high_f16(vacc);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
| 2,434 | 34.289855 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-vmulcaddc/gen/f16-vmulcaddc-c16-minmax-fma3-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-vmulcaddc/fma3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x(
size_t rows,
size_t channels,
const void* restrict input,
size_t input_stride,
const void* restrict weights,
void* restrict output,
size_t output_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
const uint16_t* i0 = (const uint16_t*) input;
uint16_t* o0 = (uint16_t*) output;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const uint16_t* w = (const uint16_t*) weights;
size_t c = channels;
for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
const __m256 vscale01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
const __m256 vscale89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
i0 += 16;
__m256 vacc1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
__m256 vacc1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
i1 += 16;
const __m256 vbias01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
const __m256 vbias89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 24)));
w += 32;
vacc0x01234567 = _mm256_fmadd_ps(vacc0x01234567, vscale01234567, vbias01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(vacc0x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(vacc1x01234567, vscale01234567, vbias01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(vacc1x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o0 += 16;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o1 += 16;
}
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
w += 8;
vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
vacc0 = _mm256_max_ps(vacc0, vmin);
vacc1 = _mm256_max_ps(vacc1, vmin);
vacc0 = _mm256_min_ps(vacc0, vmax);
vacc1 = _mm256_min_ps(vacc1, vmax);
_mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT));
o0 += 8;
_mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT));
o1 += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
__m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 = (const uint16_t*) ((uintptr_t) i0 + c);
__m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 = (const uint16_t*) ((uintptr_t) i1 + c);
const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
vacc0 = _mm256_max_ps(vacc0, vmin);
vacc1 = _mm256_max_ps(vacc1, vmin);
vacc0 = _mm256_min_ps(vacc0, vmax);
vacc1 = _mm256_min_ps(vacc1, vmax);
__m128i vh0 = _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1 = _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT);
if (c & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o0, vh0);
_mm_storel_epi64((__m128i*) o1, vh1);
vh0 = _mm_unpackhi_epi64(vh0, vh0);
vh1 = _mm_unpackhi_epi64(vh1, vh1);
o0 += 4;
o1 += 4;
}
if (c & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o0, vh0);
_mm_storeu_si32(o1, vh1);
vh0 = _mm_srli_epi64(vh0, 32);
vh1 = _mm_srli_epi64(vh1, 32);
o0 += 2;
o1 += 2;
}
if (c & (1 * sizeof(uint16_t))) {
*o0 = (uint16_t) _mm_extract_epi16(vh0, 0);
*o1 = (uint16_t) _mm_extract_epi16(vh1, 0);
o0 += 1;
o1 += 1;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 6,366 | 36.452941 | 104 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.