repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse41-int16-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/sse-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__sse41_int16_x24(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
i += 24;
const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
_mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
_mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
_mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
_mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
_mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
_mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf0));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
_mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
_mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
_mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
_mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
output += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
__m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
if (batch & (4 * sizeof(uint16_t))) {
_mm_storeu_ps(output, _mm_castsi128_ps(vf));
output += 4;
vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
output += 2;
vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
}
if (batch & (1 * sizeof(uint16_t))) {
_mm_store_ss(output, _mm_castsi128_ps(vf));
}
}
}
| 8,652 | 51.442424 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse41-int16-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/sse-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__sse41_int16_x32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
i += 32;
const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask);
const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3);
const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13);
const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset);
const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale));
const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale));
const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff);
const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
_mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
_mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
_mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
_mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
_mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
_mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3),
_mm_blendv_epi8(vdenorm6, vnorm6, _mm_cvtepi16_epi32(vmask3)));
const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3),
_mm_blendv_epi8(vdenorm7, vnorm7, _mm_unpackhi_epi16(vmask3, vmask3)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf0));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
_mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
_mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
_mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
_mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
_mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
_mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
output += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
__m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
if (batch & (4 * sizeof(uint16_t))) {
_mm_storeu_ps(output, _mm_castsi128_ps(vf));
output += 4;
vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
output += 2;
vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
}
if (batch & (1 * sizeof(uint16_t))) {
_mm_store_ss(output, _mm_castsi128_ps(vf));
}
}
}
| 10,013 | 54.325967 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse41-int16-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/sse-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__sse41_int16_x8(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
i += 8;
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
_mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
_mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
output += 8;
}
if XNN_UNPREDICTABLE(batch != 0) {
const __m128i vh = _mm_loadu_si128((const __m128i*) i);
const __m128i vsign = _mm_and_si128(vh, vsign_mask);
const __m128i vnonsign = _mm_xor_si128(vh, vsign);
const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
__m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
if (batch & (4 * sizeof(uint16_t))) {
_mm_storeu_ps(output, _mm_castsi128_ps(vf));
output += 4;
vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
_mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
}
if (batch & (2 * sizeof(uint16_t))) {
_mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
output += 2;
vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
}
if (batch & (1 * sizeof(uint16_t))) {
_mm_store_ss(output, _mm_castsi128_ps(vf));
}
}
}
| 4,465 | 41.533333 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int16-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int16_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
i += 16;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vabsf0 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
output += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 8,394 | 47.247126 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int16-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int16_x24(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
i += 24;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vh2, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vh2, vsign2);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vprenorm4 = wasm_i16x8_shl(vnonsign2, 13);
const v128_t vprenorm5 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign2, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vmask2 = wasm_i16x8_gt(vnonsign2, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vxmask4 = wasm_i32x4_extend_low_i16x8(vmask2);
const v128_t vxmask5 = wasm_i32x4_extend_high_i16x8(vmask2);
const v128_t vabsf0 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf4 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm4, vdenorm4, vxmask4);
const v128_t vsignf4 = wasm_v16x8_shuffle(vzero, vsign2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf5 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm5, vdenorm5, vxmask5);
const v128_t vsignf5 = wasm_v16x8_shuffle(vzero, vsign2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
const v128_t vf4 = wasm_v128_or(vsignf4, vabsf4);
const v128_t vf5 = wasm_v128_or(vsignf5, vabsf5);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
output += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,961 | 50.350515 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int16-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int16_x32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
const v128_t vh3 = wasm_v128_load(i + 24);
i += 32;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vh2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vh3, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vh2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vh3, vsign3);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vprenorm4 = wasm_i16x8_shl(vnonsign2, 13);
const v128_t vprenorm5 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign2, 3), vexp_offset);
const v128_t vprenorm6 = wasm_i16x8_shl(vnonsign3, 13);
const v128_t vprenorm7 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign3, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm6 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm6, vprenorm7, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm7 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm6, vprenorm7, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm6 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign3, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm7 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign3, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vmask2 = wasm_i16x8_gt(vnonsign2, vdenorm_cutoff);
const v128_t vmask3 = wasm_i16x8_gt(vnonsign3, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vxmask4 = wasm_i32x4_extend_low_i16x8(vmask2);
const v128_t vxmask5 = wasm_i32x4_extend_high_i16x8(vmask2);
const v128_t vxmask6 = wasm_i32x4_extend_low_i16x8(vmask3);
const v128_t vxmask7 = wasm_i32x4_extend_high_i16x8(vmask3);
const v128_t vabsf0 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf4 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm4, vdenorm4, vxmask4);
const v128_t vsignf4 = wasm_v16x8_shuffle(vzero, vsign2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf5 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm5, vdenorm5, vxmask5);
const v128_t vsignf5 = wasm_v16x8_shuffle(vzero, vsign2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf6 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm6, vdenorm6, vxmask6);
const v128_t vsignf6 = wasm_v16x8_shuffle(vzero, vsign3, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf7 = __builtin_wasm_relaxed_laneselect_i32x4(vnorm7, vdenorm7, vxmask7);
const v128_t vsignf7 = wasm_v16x8_shuffle(vzero, vsign3, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
const v128_t vf4 = wasm_v128_or(vsignf4, vabsf4);
const v128_t vf5 = wasm_v128_or(vsignf5, vabsf5);
const v128_t vf6 = wasm_v128_or(vsignf6, vabsf6);
const v128_t vf7 = wasm_v128_or(vsignf7, vabsf7);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
wasm_v128_store(output + 24, vf6);
wasm_v128_store(output + 28, vf7);
output += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,528 | 52.873832 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int16-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int16_x8(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = __builtin_wasm_relaxed_laneselect_i32x4(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 5,100 | 41.865546 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int32_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
i += 16;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
output += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,287 | 46.324675 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int32_x24(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
i += 24;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
output += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 8,588 | 49.22807 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int32_x32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
const v128_t vh3 = wasm_v128_load(i + 24);
i += 32;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw6 = wasm_v16x8_shuffle(vzero, vh3, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw7 = wasm_v16x8_shuffle(vzero, vh3, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
const v128_t vsign6 = wasm_v128_and(vw6, vsign_mask);
const v128_t vsign7 = wasm_v128_and(vw7, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
const v128_t vnonsign6 = wasm_v128_xor(vw6, vsign6);
const v128_t vnonsign7 = wasm_v128_xor(vw7, vsign7);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
const v128_t vnorm6 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign6, 3), vexp_offset), vexp_scale);
const v128_t vnorm7 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign7, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm6 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign6, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm7 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign7, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
const v128_t vxmask6 = wasm_i32x4_gt(vnonsign6, vdenorm_cutoff);
const v128_t vxmask7 = wasm_i32x4_gt(vnonsign7, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
const v128_t vf6 = wasm_v128_or(vsign6, wasm_v128_bitselect(vnorm6, vdenorm6, vxmask6));
const v128_t vf7 = wasm_v128_or(vsign7, wasm_v128_bitselect(vnorm7, vdenorm7, vxmask7));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
wasm_v128_store(output + 24, vf6);
wasm_v128_store(output + 28, vf7);
output += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,889 | 51.606383 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmrelaxedsimd-int32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmrelaxedsimd_int32_x8(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 4,527 | 41.317757 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int16-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int16_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
i += 16;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vabsf0 = wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
output += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 8,227 | 46.287356 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int16-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int16_x24(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
i += 24;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vh2, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vh2, vsign2);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vprenorm4 = wasm_i16x8_shl(vnonsign2, 13);
const v128_t vprenorm5 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign2, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vmask2 = wasm_i16x8_gt(vnonsign2, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vxmask4 = wasm_i32x4_extend_low_i16x8(vmask2);
const v128_t vxmask5 = wasm_i32x4_extend_high_i16x8(vmask2);
const v128_t vabsf0 = wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf4 = wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4);
const v128_t vsignf4 = wasm_v16x8_shuffle(vzero, vsign2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf5 = wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5);
const v128_t vsignf5 = wasm_v16x8_shuffle(vzero, vsign2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
const v128_t vf4 = wasm_v128_or(vsignf4, vabsf4);
const v128_t vf5 = wasm_v128_or(vsignf5, vabsf5);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
output += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,754 | 49.283505 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int16-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int16_x32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
const v128_t vh3 = wasm_v128_load(i + 24);
i += 32;
const v128_t vsign0 = wasm_v128_and(vh0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vh1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vh2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vh3, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vh0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vh1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vh2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vh3, vsign3);
const v128_t vprenorm0 = wasm_i16x8_shl(vnonsign0, 13);
const v128_t vprenorm1 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign0, 3), vexp_offset);
const v128_t vprenorm2 = wasm_i16x8_shl(vnonsign1, 13);
const v128_t vprenorm3 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign1, 3), vexp_offset);
const v128_t vprenorm4 = wasm_i16x8_shl(vnonsign2, 13);
const v128_t vprenorm5 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign2, 3), vexp_offset);
const v128_t vprenorm6 = wasm_i16x8_shl(vnonsign3, 13);
const v128_t vprenorm7 = wasm_i16x8_add(wasm_u16x8_shr(vnonsign3, 3), vexp_offset);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm0, vprenorm1, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm2, vprenorm3, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm4, vprenorm5, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vnorm6 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm6, vprenorm7, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm7 = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm6, vprenorm7, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign0, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign1, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign2, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vdenorm6 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign3, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm7 = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign3, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask0 = wasm_i16x8_gt(vnonsign0, vdenorm_cutoff);
const v128_t vmask1 = wasm_i16x8_gt(vnonsign1, vdenorm_cutoff);
const v128_t vmask2 = wasm_i16x8_gt(vnonsign2, vdenorm_cutoff);
const v128_t vmask3 = wasm_i16x8_gt(vnonsign3, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask0 = wasm_i32x4_extend_low_i16x8(vmask0);
const v128_t vxmask1 = wasm_i32x4_extend_high_i16x8(vmask0);
const v128_t vxmask2 = wasm_i32x4_extend_low_i16x8(vmask1);
const v128_t vxmask3 = wasm_i32x4_extend_high_i16x8(vmask1);
const v128_t vxmask4 = wasm_i32x4_extend_low_i16x8(vmask2);
const v128_t vxmask5 = wasm_i32x4_extend_high_i16x8(vmask2);
const v128_t vxmask6 = wasm_i32x4_extend_low_i16x8(vmask3);
const v128_t vxmask7 = wasm_i32x4_extend_high_i16x8(vmask3);
const v128_t vabsf0 = wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0);
const v128_t vsignf0 = wasm_v16x8_shuffle(vzero, vsign0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf1 = wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1);
const v128_t vsignf1 = wasm_v16x8_shuffle(vzero, vsign0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf2 = wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2);
const v128_t vsignf2 = wasm_v16x8_shuffle(vzero, vsign1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf3 = wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3);
const v128_t vsignf3 = wasm_v16x8_shuffle(vzero, vsign1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf4 = wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4);
const v128_t vsignf4 = wasm_v16x8_shuffle(vzero, vsign2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf5 = wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5);
const v128_t vsignf5 = wasm_v16x8_shuffle(vzero, vsign2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vabsf6 = wasm_v128_bitselect(vnorm6, vdenorm6, vxmask6);
const v128_t vsignf6 = wasm_v16x8_shuffle(vzero, vsign3, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf7 = wasm_v128_bitselect(vnorm7, vdenorm7, vxmask7);
const v128_t vsignf7 = wasm_v16x8_shuffle(vzero, vsign3, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf0 = wasm_v128_or(vsignf0, vabsf0);
const v128_t vf1 = wasm_v128_or(vsignf1, vabsf1);
const v128_t vf2 = wasm_v128_or(vsignf2, vabsf2);
const v128_t vf3 = wasm_v128_or(vsignf3, vabsf3);
const v128_t vf4 = wasm_v128_or(vsignf4, vabsf4);
const v128_t vf5 = wasm_v128_or(vsignf5, vabsf5);
const v128_t vf6 = wasm_v128_or(vsignf6, vabsf6);
const v128_t vf7 = wasm_v128_or(vsignf7, vabsf7);
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
wasm_v128_store(output + 24, vf6);
wasm_v128_store(output + 28, vf7);
output += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,281 | 51.719626 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int16-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int16.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int16_x8(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vf_lo = wasm_v128_or(vsignf_lo, vabsf_lo);
const v128_t vf_hi = wasm_v128_or(vsignf_hi, vabsf_hi);
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vsign = wasm_v128_and(vh, vsign_mask);
const v128_t vnonsign = wasm_v128_xor(vh, vsign);
const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0, 8, 1, 9, 2, 10, 3, 11), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0, 8, 1, 9, 2, 10, 3, 11), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
const v128_t vabsf_lo = wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo);
const v128_t vsignf_lo = wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vabsf_hi = wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi);
const v128_t vsignf_hi = wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15);
v128_t vf = wasm_v128_or(vsignf_lo, vabsf_lo);
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
vf = wasm_v128_or(vsignf_hi, vabsf_hi);
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 5,013 | 41.134454 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
i += 16;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
output += 16;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,280 | 46.279221 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int32-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x24(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
i += 24;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
output += 24;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 8,581 | 49.187135 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int32-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x32(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const v128_t vh0 = wasm_v128_load(i);
const v128_t vh1 = wasm_v128_load(i + 8);
const v128_t vh2 = wasm_v128_load(i + 16);
const v128_t vh3 = wasm_v128_load(i + 24);
i += 32;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vw6 = wasm_v16x8_shuffle(vzero, vh3, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw7 = wasm_v16x8_shuffle(vzero, vh3, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
const v128_t vsign6 = wasm_v128_and(vw6, vsign_mask);
const v128_t vsign7 = wasm_v128_and(vw7, vsign_mask);
const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
const v128_t vnonsign6 = wasm_v128_xor(vw6, vsign6);
const v128_t vnonsign7 = wasm_v128_xor(vw7, vsign7);
const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
const v128_t vnorm6 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign6, 3), vexp_offset), vexp_scale);
const v128_t vnorm7 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign7, 3), vexp_offset), vexp_scale);
const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm6 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign6, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm7 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign7, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
const v128_t vxmask6 = wasm_i32x4_gt(vnonsign6, vdenorm_cutoff);
const v128_t vxmask7 = wasm_i32x4_gt(vnonsign7, vdenorm_cutoff);
const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
const v128_t vf6 = wasm_v128_or(vsign6, wasm_v128_bitselect(vnorm6, vdenorm6, vxmask6));
const v128_t vf7 = wasm_v128_or(vsign7, wasm_v128_bitselect(vnorm7, vdenorm7, vxmask7));
wasm_v128_store(output, vf0);
wasm_v128_store(output + 4, vf1);
wasm_v128_store(output + 8, vf2);
wasm_v128_store(output + 12, vf3);
wasm_v128_store(output + 16, vf4);
wasm_v128_store(output + 20, vf5);
wasm_v128_store(output + 24, vf6);
wasm_v128_store(output + 28, vf7);
output += 32;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,882 | 51.569149 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-wasmsimd-int32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x8(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (batch & (4 * sizeof(uint16_t))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (batch & (2 * sizeof(uint16_t))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(uint16_t))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 4,520 | 41.252336 | 118 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-1x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_1x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,166 | 28.055046 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-1x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_1x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,468 | 24.71875 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-3x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_3x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,403 | 36.450292 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-4x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_4x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
c3 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,022 | 38.717822 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-4x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_4x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,845 | 34.430303 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-5x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_5x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c4 + 8), _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
_mm_storeu_si128((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh4x01234567 = _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
c3 += 8;
c4 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,640 | 40.377682 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-5x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_5x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,970 | 36.079787 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-6x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_6x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
c5 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c5, vh5x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
c5 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
*c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,096 | 37.374408 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-gemm/gen/f16-f32acc-gemm-7x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_gemm_minmax_ukernel_7x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const uint16_t* a6 = (const uint16_t*) ((uintptr_t) a5 + a_stride);
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
const __m256 va6 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a6));
a6 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
_mm_storeu_si128((__m128i*) c6, _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
a6 = (const uint16_t*) ((uintptr_t) a6 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6x01234567 = _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
_mm_storel_epi64((__m128i*) c6, vh6x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
vh6x01234567 = _mm_unpackhi_epi64(vh6x01234567, vh6x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
c5 += 4;
c6 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c5, vh5x01234567);
_mm_storeu_si32(c6, vh6x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
vh6x01234567 = _mm_srli_epi64(vh6x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
c5 += 2;
c6 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
*c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
*c6 = (uint16_t) _mm_extract_epi16(vh6x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,221 | 38.410256 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-1x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_1x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,588 | 28.178862 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-1x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_1x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,886 | 25.245455 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-3x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_3x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c2 += 8;
c1 += 8;
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,939 | 35.719577 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-4x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_4x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,615 | 37.810811 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-4x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_4x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,428 | 33.751351 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-5x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_5x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c4 + 8), _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c4, vh4x01234567);
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh4x01234567 = _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c4 = _mm_extract_epi16(vh4x01234567, 0);
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,290 | 39.356863 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-5x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_5x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
a += 5;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 5 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c4 = _mm_extract_epi16(vh4x01234567, 0);
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,608 | 35.233333 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-6x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_6x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
const uint16_t* restrict a5 = (const uint16_t*) a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 6 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c5, vh5x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c5 = _mm_extract_epi16(vh5x01234567, 0);
*c4 = _mm_extract_epi16(vh4x01234567, 0);
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,789 | 36.404255 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-igemm/gen/f16-f32acc-igemm-7x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_f32acc_igemm_minmax_ukernel_7x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (7 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
const uint16_t* restrict a5 = (const uint16_t*) a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
}
const uint16_t* restrict a6 = (const uint16_t*) a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const uint16_t*) ((uintptr_t) a6 + a_offset);
}
a += 7;
size_t k = kc;
do {
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
const __m256 va6 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a6));
a6 += 1;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
k -= sizeof(uint16_t);
} while (k != 0);
p -= 7 * sizeof(void*);
} while (p != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c6, _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
__m128i vh6x01234567 = _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c6, vh6x01234567);
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh6x01234567 = _mm_unpackhi_epi64(vh6x01234567, vh6x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c6, vh6x01234567);
_mm_storeu_si32(c5, vh5x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c0, vh0x01234567);
vh6x01234567 = _mm_srli_epi64(vh6x01234567, 32);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
*c6 = _mm_extract_epi16(vh6x01234567, 0);
*c5 = _mm_extract_epi16(vh5x01234567, 0);
*c4 = _mm_extract_epi16(vh4x01234567, 0);
*c3 = _mm_extract_epi16(vh3x01234567, 0);
*c2 = _mm_extract_epi16(vh2x01234567, 0);
*c1 = _mm_extract_epi16(vh1x01234567, 0);
*c0 = _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,969 | 37.346154 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-f16c-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/unaligned.h>
void xnn_f16_f32acc_rsum_ukernel__f16c_x16_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const __m256 vt0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vt1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
i += 16;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vt = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m128i vmask = _mm_loadu_si128((const __m128i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m128i vh = _mm_castps_si128(_mm_maskload_ps((const float*) i, vmask));
const __m256 vt = _mm256_cvtph_ps(vh);
vacc0 = _mm256_add_ps(vacc0, vt);
i = (const void*) ((uintptr_t) i + batch);
if (batch & (1 * sizeof(uint16_t))) {
const __m128i vh = _mm_insert_epi16(_mm_setzero_si128(), (int) unaligned_load_u16(i - 1), 0);
const __m256 vt = _mm256_zextps128_ps256(_mm_cvtph_ps(vh));
vacc0 = _mm256_add_ps(vacc0, vt);
}
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
const __m128i vout = _mm_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
}
| 2,511 | 35.405797 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-f16c-x24-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/unaligned.h>
void xnn_f16_f32acc_rsum_ukernel__f16c_x24_acc3(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const __m256 vt0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vt1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vt2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
i += 24;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc2 = _mm256_add_ps(vacc2, vt2);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc0 = _mm256_add_ps(vacc0, vacc2);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vt = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m128i vmask = _mm_loadu_si128((const __m128i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m128i vh = _mm_castps_si128(_mm_maskload_ps((const float*) i, vmask));
const __m256 vt = _mm256_cvtph_ps(vh);
vacc0 = _mm256_add_ps(vacc0, vt);
i = (const void*) ((uintptr_t) i + batch);
if (batch & (1 * sizeof(uint16_t))) {
const __m128i vh = _mm_insert_epi16(_mm_setzero_si128(), (int) unaligned_load_u16(i - 1), 0);
const __m256 vt = _mm256_zextps128_ps256(_mm_cvtph_ps(vh));
vacc0 = _mm256_add_ps(vacc0, vt);
}
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
const __m128i vout = _mm_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
}
| 2,711 | 36.150685 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-f16c-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/unaligned.h>
void xnn_f16_f32acc_rsum_ukernel__f16c_x32_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vt0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vt1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vt2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vt3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc0 = _mm256_add_ps(vacc0, vt2);
vacc1 = _mm256_add_ps(vacc1, vt3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vt = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m128i vmask = _mm_loadu_si128((const __m128i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m128i vh = _mm_castps_si128(_mm_maskload_ps((const float*) i, vmask));
const __m256 vt = _mm256_cvtph_ps(vh);
vacc0 = _mm256_add_ps(vacc0, vt);
i = (const void*) ((uintptr_t) i + batch);
if (batch & (1 * sizeof(uint16_t))) {
const __m128i vh = _mm_insert_epi16(_mm_setzero_si128(), (int) unaligned_load_u16(i - 1), 0);
const __m256 vt = _mm256_zextps128_ps256(_mm_cvtph_ps(vh));
vacc0 = _mm256_add_ps(vacc0, vt);
}
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
const __m128i vout = _mm_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
}
| 2,757 | 36.780822 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-f16c-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/unaligned.h>
void xnn_f16_f32acc_rsum_ukernel__f16c_x32_acc4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
__m256 vacc0 = _mm256_setzero_ps();
__m256 vacc1 = _mm256_setzero_ps();
__m256 vacc2 = _mm256_setzero_ps();
__m256 vacc3 = _mm256_setzero_ps();
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const __m256 vt0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
const __m256 vt1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
const __m256 vt2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16)));
const __m256 vt3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24)));
i += 32;
vacc0 = _mm256_add_ps(vacc0, vt0);
vacc1 = _mm256_add_ps(vacc1, vt1);
vacc2 = _mm256_add_ps(vacc2, vt2);
vacc3 = _mm256_add_ps(vacc3, vt3);
}
vacc0 = _mm256_add_ps(vacc0, vacc1);
vacc2 = _mm256_add_ps(vacc2, vacc3);
vacc0 = _mm256_add_ps(vacc0, vacc2);
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vt = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m128i vmask = _mm_loadu_si128((const __m128i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m128i vh = _mm_castps_si128(_mm_maskload_ps((const float*) i, vmask));
const __m256 vt = _mm256_cvtph_ps(vh);
vacc0 = _mm256_add_ps(vacc0, vt);
i = (const void*) ((uintptr_t) i + batch);
if (batch & (1 * sizeof(uint16_t))) {
const __m128i vh = _mm_insert_epi16(_mm_setzero_si128(), (int) unaligned_load_u16(i - 1), 0);
const __m256 vt = _mm256_zextps128_ps256(_mm_cvtph_ps(vh));
vacc0 = _mm256_add_ps(vacc0, vt);
}
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
const __m128i vout = _mm_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
}
| 2,911 | 36.818182 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-f16c-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/f16c.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/unaligned.h>
void xnn_f16_f32acc_rsum_ukernel__f16c_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
__m256 vacc0 = _mm256_setzero_ps();
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const __m256 vt = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
i += 8;
vacc0 = _mm256_add_ps(vacc0, vt);
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const __m128i vmask = _mm_loadu_si128((const __m128i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m128i vh = _mm_castps_si128(_mm_maskload_ps((const float*) i, vmask));
const __m256 vt = _mm256_cvtph_ps(vh);
vacc0 = _mm256_add_ps(vacc0, vt);
i = (const void*) ((uintptr_t) i + batch);
if (batch & (1 * sizeof(uint16_t))) {
const __m128i vh = _mm_insert_epi16(_mm_setzero_si128(), (int) unaligned_load_u16(i - 1), 0);
const __m256 vt = _mm256_zextps128_ps256(_mm_cvtph_ps(vh));
vacc0 = _mm256_add_ps(vacc0, vt);
}
}
__m128 vacc = _mm_add_ps(_mm256_castps256_ps128(vacc0), _mm256_extractf128_ps(vacc0, 1));
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_movehdup_ps(vacc));
vacc = _mm_mul_ss(vacc, _mm_load_ss(¶ms->avx.scale));
const __m128i vout = _mm_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT);
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
}
| 2,097 | 34.559322 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x16_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc0 = vaddq_f32(vacc0, vt2);
vacc1 = vaddq_f32(vacc1, vt3);
}
vacc0 = vaddq_f32(vacc0, vacc1);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 2,511 | 34.885714 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x24-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x24_acc3(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh45 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
const float32x4_t vt4 = vcvt_f32_f16(vget_low_f16(vh45));
const float32x4_t vt5 = vcvt_f32_f16(vget_high_f16(vh45));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
vacc0 = vaddq_f32(vacc0, vt3);
vacc1 = vaddq_f32(vacc1, vt4);
vacc2 = vaddq_f32(vacc2, vt5);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 2,856 | 36.103896 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x32-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc2(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh45 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh67 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
const float32x4_t vt4 = vcvt_f32_f16(vget_low_f16(vh45));
const float32x4_t vt5 = vcvt_f32_f16(vget_high_f16(vh45));
const float32x4_t vt6 = vcvt_f32_f16(vget_low_f16(vh67));
const float32x4_t vt7 = vcvt_f32_f16(vget_high_f16(vh67));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc0 = vaddq_f32(vacc0, vt2);
vacc1 = vaddq_f32(vacc1, vt3);
vacc0 = vaddq_f32(vacc0, vt4);
vacc1 = vaddq_f32(vacc1, vt5);
vacc0 = vaddq_f32(vacc0, vt6);
vacc1 = vaddq_f32(vacc1, vt7);
}
vacc0 = vaddq_f32(vacc0, vacc1);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 3,049 | 37.125 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x32-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh45 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh67 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
const float32x4_t vt4 = vcvt_f32_f16(vget_low_f16(vh45));
const float32x4_t vt5 = vcvt_f32_f16(vget_high_f16(vh45));
const float32x4_t vt6 = vcvt_f32_f16(vget_low_f16(vh67));
const float32x4_t vt7 = vcvt_f32_f16(vget_high_f16(vh67));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
vacc3 = vaddq_f32(vacc3, vt3);
vacc0 = vaddq_f32(vacc0, vt4);
vacc1 = vaddq_f32(vacc1, vt5);
vacc2 = vaddq_f32(vacc2, vt6);
vacc3 = vaddq_f32(vacc3, vt7);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 3,201 | 37.119048 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 1,810 | 32.537037 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-f32acc-rsum/gen/f16-f32acc-rsum-neonfp16-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-f32acc-rsum/neonfp16.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
| 1,810 | 32.537037 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool-cw/f16-gavgpool-cw-neonfp16arith-x8.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8(
size_t elements,
size_t channels,
const void* input,
void* output,
const union xnn_f16_gavgpool_params* params) XNN_OOB_READS
{
assert(elements != 0);
assert(elements % sizeof(uint16_t) == 0);
assert(channels != 0);
const uint16x8_t vmask = vld1q_u16(params->neonfp16arith.mask);
const float16x4_t vmultiplier = vreinterpret_f16_u16(vld1_dup_u16(¶ms->neonfp16arith.multiplier));
const float16x4_t voutput_min = vreinterpret_f16_u16(vld1_dup_u16(¶ms->neonfp16arith.output_min));
const float16x4_t voutput_max = vreinterpret_f16_u16(vld1_dup_u16(¶ms->neonfp16arith.output_max));
uint16_t* o = (uint16_t*) output;
const uint16_t* i = input;
do {
float16x8_t vsum0 = vreinterpretq_f16_u16(vmovq_n_u16(0));
float16x8_t vsum1 = vreinterpretq_f16_u16(vmovq_n_u16(0));
size_t n = elements;
if (n >= 32 * sizeof(uint16_t)) {
do {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i));
const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i + 8));
const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i + 16));
const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i + 24));
i += 32;
const float16x8_t acc0 = vaddq_f16(vi0, vi1);
const float16x8_t acc1 = vaddq_f16(vi2, vi3);
vsum0 = vaddq_f16(vsum0, acc0);
vsum1 = vaddq_f16(vsum1, acc1);
n -= 32 * sizeof(uint16_t);
} while (n >= 32 * sizeof(uint16_t));
}
vsum0 = vaddq_f16(vsum0, vsum1);
while (n >= 8 * sizeof(uint16_t)) {
const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i));
i += 8;
vsum0 = vaddq_f16(vsum0, vi0);
n -= 8 * sizeof(uint16_t);
}
if XNN_UNLIKELY(n != 0) {
float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i)); i = (const uint16_t*) ((uintptr_t) i + n);
vi0 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0)));
vsum0 = vaddq_f16(vsum0, vi0);
}
const float16x4_t vout4 = vpadd_f16(vget_low_f16(vsum0), vget_high_f16(vsum0));
const float16x4_t vout2 = vpadd_f16(vout4, vout4);
const float16x4_t vout1 = vpadd_f16(vout2, vout2);
float16x4_t vout = vmul_f16(vout1, vmultiplier);
vout = vmax_f16(vout, voutput_min);
vout = vmin_f16(vout, voutput_max);
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0); o += 1;
} while (--channels != 0);
}
| 2,716 | 33.392405 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-f16c-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 16; c -= 16) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
_mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 16; c -= 16) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) (b + 8));
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
_mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 16; channels -= 16) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
__m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
__m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) ((uint16_t*) o + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
if XNN_LIKELY(channels >= 8) {
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
channels -= 8;
} else {
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
channels = 0;
}
} while (channels != 0);
}
}
| 19,539 | 64.57047 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-f16c-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
_mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
_mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
__m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) (b + 8));
__m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) (b + 16));
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
_mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
_mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 24; channels -= 24) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
__m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
__m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
__m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
__m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) ((uint16_t*) o + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) ((uint16_t*) o + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_TO_NEAREST_INT));
o += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
if XNN_LIKELY(channels >= 8) {
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
channels -= 8;
} else {
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
channels = 0;
}
} while (channels != 0);
}
}
| 24,687 | 70.146974 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-f16c-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
_mm_store_si128((__m128i*) b, vacc01234567); b += 8;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 8; channels -= 8) {
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
__m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
}
}
}
| 10,730 | 54.030769 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-neonfp16arith-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c16(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 16; c -= 16) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 16; c -= 16) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(b + 8));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 16; channels -= 16) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,754 | 49.704467 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-neonfp16arith-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c24(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccGHIJKLMN = vaddq_f16(vi0xGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccGHIJKLMN)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(b + 8));
float16x8_t vaccGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(b + 16));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi0xGHIJKLMN);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccGHIJKLMN)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 24; channels -= 24) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vaccGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi0xGHIJKLMN);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vaccGHIJKLMN = vmulq_f16(vaccGHIJKLMN, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMN, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 18,272 | 52.744118 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-neonfp16arith-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c32(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccGHIJKLMN = vaddq_f16(vi0xGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccOPQRSTUV = vaddq_f16(vi0xOPQRSTUV, vi1xOPQRSTUV);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi2xOPQRSTUV);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi3xOPQRSTUV);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi4xOPQRSTUV);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi5xOPQRSTUV);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi6xOPQRSTUV);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccGHIJKLMN)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccOPQRSTUV)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(b + 8));
float16x8_t vaccGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(b + 16));
float16x8_t vaccOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(b + 24));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi0xGHIJKLMN);
const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi0xOPQRSTUV);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi1xOPQRSTUV);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi2xOPQRSTUV);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi3xOPQRSTUV);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi4xOPQRSTUV);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi5xOPQRSTUV);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi6xOPQRSTUV);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEF)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccGHIJKLMN)); b += 8;
vst1q_u16(b, vreinterpretq_u16_f16(vaccOPQRSTUV)); b += 8;
}
if XNN_UNLIKELY(c != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 32; channels -= 32) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vacc89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vaccGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
float16x8_t vaccOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi0x89ABCDEF);
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi0xGHIJKLMN);
const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi0xOPQRSTUV);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi1xOPQRSTUV);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi2xOPQRSTUV);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi3xOPQRSTUV);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi4xOPQRSTUV);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi5xOPQRSTUV);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi6xOPQRSTUV);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vaccGHIJKLMN = vmulq_f16(vaccGHIJKLMN, vscale);
vaccOPQRSTUV = vmulq_f16(vaccOPQRSTUV, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMN, vmin);
vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUV, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,790 | 55.017995 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7p7x-minmax-neonfp16arith-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/multipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* buffer,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
uint16_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
uint16_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567)); b += 8;
}
}
i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 8; channels -= 8) {
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
{
float16x8_t vacc01234567 = vreinterpretq_f16_u16(vld1q_u16(buffer)); buffer = (uint16_t*) buffer + 8;
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi0x01234567);
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
}
}
}
| 8,580 | 44.163158 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-f16c-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 16; channels -= 16) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
i0 += 16;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
i1 += 16;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
i2 += 16;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
i3 += 16;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
i4 += 16;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
i5 += 16;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
i6 += 16;
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
__m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
o += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
i2 += 8;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
if XNN_LIKELY(channels >= 8) {
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
channels -= 8;
} else {
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
channels = 0;
}
} while (channels != 0);
}
}
| 7,973 | 47.621951 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-f16c-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 24; channels -= 24) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 16)));
i0 += 24;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 16)));
i1 += 24;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 16)));
__m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
i2 += 24;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 16)));
i3 += 24;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 16)));
i4 += 24;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 16)));
i5 += 24;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 16)));
i6 += 24;
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
__m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
__m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_TO_NEAREST_INT));
o += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
i2 += 8;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
if XNN_LIKELY(channels >= 8) {
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
channels -= 8;
} else {
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
channels = 0;
}
} while (channels != 0);
}
}
| 9,710 | 52.651934 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-f16c-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 32; channels -= 32) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 16)));
const __m256 vi0xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 24)));
i0 += 32;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 16)));
const __m256 vi1xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 24)));
i1 += 32;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
__m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 16)));
__m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi2xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 24)));
__m128i vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(vi0xOPQRSTUV, vi1xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
i2 += 32;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 16)));
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 24)));
i3 += 32;
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi2xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 16)));
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 24)));
i4 += 32;
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi3xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 16)));
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 24)));
i5 += 32;
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi4xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 16)));
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 24)));
i6 += 32;
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi5xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_TO_NEAREST_INT);
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi6xOPQRSTUV), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_TO_NEAREST_INT);
vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_TO_NEAREST_INT);
vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
__m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
__m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
__m256 voutOPQRSTUV = _mm256_max_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
voutOPQRSTUV = _mm256_min_ps(voutOPQRSTUV, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(voutOPQRSTUV, _MM_FROUND_TO_NEAREST_INT));
o += 32;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
i2 += 8;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
if XNN_LIKELY(channels >= 8) {
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
channels -= 8;
} else {
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,447 | 56.818182 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-f16c-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-f16c.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
uint16_t* o = (uint16_t*) output;
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; channels >= 8; channels -= 8) {
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
i2 += 8;
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
_mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT));
o += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
__m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_TO_NEAREST_INT);
const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_TO_NEAREST_INT);
vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_TO_NEAREST_INT);
__m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
vout01234567 = _mm256_min_ps(vout01234567, vmax);
__m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_TO_NEAREST_INT);
if (channels & 4) {
_mm_storel_epi64((__m128i*) o, vh01234567);
o += 4;
vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
}
if (channels & 2) {
_mm_storeu_si32(o, vh01234567);
o += 2;
vh01234567 = _mm_srli_epi64(vh01234567, 32);
}
if (channels & 1) {
*o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
}
}
}
}
| 5,855 | 43.030075 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-neonfp16arith-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c16(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 16; channels -= 16) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 6,328 | 42.951389 | 100 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-neonfp16arith-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c24(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 24; channels -= 24) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccGHIJKLMN = vaddq_f16(vi0xGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vaccGHIJKLMN = vmulq_f16(vaccGHIJKLMN, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMN, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 7,521 | 45.720497 | 100 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-neonfp16arith-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c32(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 32; channels -= 32) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc89ABCDEF = vaddq_f16(vi0x89ABCDEF, vi1x89ABCDEF);
const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccGHIJKLMN = vaddq_f16(vi0xGHIJKLMN, vi1xGHIJKLMN);
const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vaccOPQRSTUV = vaddq_f16(vi0xOPQRSTUV, vi1xOPQRSTUV);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi2x89ABCDEF);
const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi2xGHIJKLMN);
const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi2xOPQRSTUV);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi3x89ABCDEF);
const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi3xGHIJKLMN);
const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi3xOPQRSTUV);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi4x89ABCDEF);
const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi4xGHIJKLMN);
const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi4xOPQRSTUV);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi5x89ABCDEF);
const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi5xGHIJKLMN);
const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi5xOPQRSTUV);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc89ABCDEF = vaddq_f16(vacc89ABCDEF, vi6x89ABCDEF);
vaccGHIJKLMN = vaddq_f16(vaccGHIJKLMN, vi6xGHIJKLMN);
vaccOPQRSTUV = vaddq_f16(vaccOPQRSTUV, vi6xOPQRSTUV);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vscale);
vaccGHIJKLMN = vmulq_f16(vaccGHIJKLMN, vscale);
vaccOPQRSTUV = vmulq_f16(vaccOPQRSTUV, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc89ABCDEF = vmaxq_f16(vacc89ABCDEF, vmin);
vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMN, vmin);
vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUV, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax);
vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax);
vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output = (uint16_t*) output + 8;
vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
if XNN_LIKELY(channels >= 8) {
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
channels -= 8;
} else {
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,714 | 47.960674 | 100 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gavgpool/gen/f16-gavgpool-7x-minmax-neonfp16arith-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gavgpool/unipass-neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8(
size_t rows,
size_t channels,
const void* input,
size_t input_stride,
const void* zero,
void* output,
const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const uint16_t* i0 = input;
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = (const uint16_t*) zero;
}
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = (const uint16_t*) zero;
}
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = (const uint16_t*) zero;
}
const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = (const uint16_t*) zero;
}
const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = (const uint16_t*) zero;
}
const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = (const uint16_t*) zero;
}
const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.scale));
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; channels >= 8; channels -= 8) {
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8;
vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
vacc01234567 = vmulq_f16(vacc01234567, vscale);
vacc01234567 = vmaxq_f16(vacc01234567, vmin);
vacc01234567 = vminq_f16(vacc01234567, vmax);
float16x4_t vacc0123 = vget_low_f16(vacc01234567);
if (channels & 4) {
vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4;
vacc0123 = vget_high_f16(vacc01234567);
}
if (channels & 2) {
vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2;
vacc0123 = vext_f16(vacc0123, vacc0123, 2);
}
if (channels & 1) {
vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1;
}
}
}
}
| 4,880 | 39.338843 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-1x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_1x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,281 | 29.110092 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-1x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,913 | 37.907895 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-1x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_1x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
c0 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
c0 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 2,522 | 25.28125 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-1x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,104 | 30.821705 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-3x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_3x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc1x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc2x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,762 | 38.549708 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-4x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_4x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc1x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc2x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc3x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
c3 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,503 | 41.09901 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-4x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_4x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,279 | 49.28169 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-4x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_4x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,082 | 35.866667 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-4x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,368 | 42.375 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-5x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_5x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w = (const uint16_t*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
w = (const uint16_t*) w + 16;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc1x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc2x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc3x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
vacc4x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
_mm_storeu_si128((__m128i*) (c4 + 8), _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
nc -= 16;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 8) {
_mm_storeu_si128((__m128i*) c0, vh0x01234567);
_mm_storeu_si128((__m128i*) c1, vh1x01234567);
_mm_storeu_si128((__m128i*) c2, vh2x01234567);
_mm_storeu_si128((__m128i*) c3, vh3x01234567);
_mm_storeu_si128((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
vh4x01234567 = _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_TO_NEAREST_INT);
c0 += 8;
c1 += 8;
c2 += 8;
c3 += 8;
c4 += 8;
}
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,243 | 42.965665 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-5x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_5x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,268 | 37.664894 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-6x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc4x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc5x01234567 = vacc0x01234567;
float16x8_t vacc5x89ABCDEF = vacc0x89ABCDEF;
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
vst1q_u16(c4 + 8, vreinterpretq_u16_f16(vacc4x89ABCDEF));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
vst1q_u16(c5 + 8, vreinterpretq_u16_f16(vacc5x89ABCDEF));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567)); c4 += 8;
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567)); c5 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc4x01234567 = vacc4x89ABCDEF;
vacc5x01234567 = vacc5x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,856 | 52.379032 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-6x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_6x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
c5 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c5, vh5x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
c5 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
*c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,455 | 39.075829 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-6x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc5x01234567 = vacc0x01234567;
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,877 | 46 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-7x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/avx2-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_7x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = a;
uint16_t* c0 = c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const uint16_t* a6 = (const uint16_t*) ((uintptr_t) a5 + a_stride);
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w = (const uint16_t*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
a0 += 1;
const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
a1 += 1;
const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
a2 += 1;
const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
a3 += 1;
const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
a4 += 1;
const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
a5 += 1;
const __m256 va6 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a6));
a6 += 1;
const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
w = (const uint16_t*) w + 8;
vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_TO_NEAREST_INT));
vacc6x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va6, vb01234567, vacc6x01234567), _MM_FROUND_TO_NEAREST_INT));
k -= sizeof(uint16_t);
} while (k != 0);
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
_mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
_mm_storeu_si128((__m128i*) c6, _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
a6 = (const uint16_t*) ((uintptr_t) a6 - kc);
nc -= 8;
} else {
__m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_TO_NEAREST_INT);
__m128i vh6x01234567 = _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_TO_NEAREST_INT);
if (nc & 4) {
_mm_storel_epi64((__m128i*) c0, vh0x01234567);
_mm_storel_epi64((__m128i*) c1, vh1x01234567);
_mm_storel_epi64((__m128i*) c2, vh2x01234567);
_mm_storel_epi64((__m128i*) c3, vh3x01234567);
_mm_storel_epi64((__m128i*) c4, vh4x01234567);
_mm_storel_epi64((__m128i*) c5, vh5x01234567);
_mm_storel_epi64((__m128i*) c6, vh6x01234567);
vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
vh6x01234567 = _mm_unpackhi_epi64(vh6x01234567, vh6x01234567);
c0 += 4;
c1 += 4;
c2 += 4;
c3 += 4;
c4 += 4;
c5 += 4;
c6 += 4;
}
if (nc & 2) {
_mm_storeu_si32(c0, vh0x01234567);
_mm_storeu_si32(c1, vh1x01234567);
_mm_storeu_si32(c2, vh2x01234567);
_mm_storeu_si32(c3, vh3x01234567);
_mm_storeu_si32(c4, vh4x01234567);
_mm_storeu_si32(c5, vh5x01234567);
_mm_storeu_si32(c6, vh6x01234567);
vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
vh6x01234567 = _mm_srli_epi64(vh6x01234567, 32);
c0 += 2;
c1 += 2;
c2 += 2;
c3 += 2;
c4 += 2;
c5 += 2;
c6 += 2;
}
if (nc & 1) {
*c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
*c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
*c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
*c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
*c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
*c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
*c6 = (uint16_t) _mm_extract_epi16(vh6x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,641 | 40.205128 | 133 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemm-8x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const uint16_t* a6 = (const uint16_t*) ((uintptr_t) a5 + a_stride);
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const uint16_t* a7 = (const uint16_t*) ((uintptr_t) a6 + a_stride);
uint16_t* c7 = (uint16_t*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc5x01234567 = vacc0x01234567;
float16x8_t vacc6x01234567 = vacc0x01234567;
float16x8_t vacc7x01234567 = vacc0x01234567;
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x4_t va6 = vreinterpret_f16_u16(vld1_u16(a6)); a6 += 4;
const float16x4_t va7 = vreinterpret_f16_u16(vld1_u16(a7)); a7 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t va6 = vreinterpretq_f16_u16(vld1q_dup_u16(a6)); a6 += 1;
const float16x8_t va7 = vreinterpretq_f16_u16(vld1q_dup_u16(a7)); a7 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
vst1q_u16(c6, vreinterpretq_u16_f16(vacc6x01234567));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
vst1q_u16(c7, vreinterpretq_u16_f16(vacc7x01234567));
c7 = (uint16_t*) ((uintptr_t) c7 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
a6 = (const uint16_t*) ((uintptr_t) a6 - kc);
a7 = (const uint16_t*) ((uintptr_t) a7 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vst1_u16(c6, vreinterpret_u16_f16(vacc6x0123)); c6 += 4;
vst1_u16(c7, vreinterpret_u16_f16(vacc7x0123)); c7 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
vacc6x0123 = vget_high_f16(vacc6x01234567);
vacc7x0123 = vget_high_f16(vacc7x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vst1_lane_u32((void*) c6, vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
vst1_lane_u32((void*) c7, vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
vst1_lane_u16(c6, vreinterpret_u16_f16(vacc6x0123), 0);
vst1_lane_u16(c7, vreinterpret_u16_f16(vacc7x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,386 | 48.358434 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-1x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_1x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,981 | 37.844156 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-1x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_1x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,166 | 30.80916 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-4x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_4x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,851 | 50.93007 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-4x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_4x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,682 | 43.417431 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-6x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc4x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc5x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
vst1q_u16(c4 + 8, vreinterpretq_u16_f16(vacc4x89ABCDEF));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
vst1q_u16(c5 + 8, vreinterpretq_u16_f16(vacc5x89ABCDEF));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567)); c4 += 8;
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567)); c5 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc4x01234567 = vacc4x89ABCDEF;
vacc5x01234567 = vacc5x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,764 | 54.52139 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-6x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc4x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc5x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,359 | 47.405797 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-gemm/gen/f16-gemminc-8x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-gemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const void* restrict acc,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
assert(acc != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const uint16_t* a6 = (const uint16_t*) ((uintptr_t) a5 + a_stride);
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const uint16_t* a7 = (const uint16_t*) ((uintptr_t) a6 + a_stride);
uint16_t* c7 = (uint16_t*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc2x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc3x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc4x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc5x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc6x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
float16x8_t vacc7x01234567 = vreinterpretq_f16_u16(vld1q_u16(acc)); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
size_t k = kc;
while (k >= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x4_t va6 = vreinterpret_f16_u16(vld1_u16(a6)); a6 += 4;
const float16x4_t va7 = vreinterpret_f16_u16(vld1_u16(a7)); a7 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#endif
k -= 4 * sizeof(uint16_t);
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t va6 = vreinterpretq_f16_u16(vld1q_dup_u16(a6)); a6 += 1;
const float16x8_t va7 = vreinterpretq_f16_u16(vld1q_dup_u16(a7)); a7 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
vst1q_u16(c6, vreinterpretq_u16_f16(vacc6x01234567));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
vst1q_u16(c7, vreinterpretq_u16_f16(vacc7x01234567));
c7 = (uint16_t*) ((uintptr_t) c7 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
a6 = (const uint16_t*) ((uintptr_t) a6 - kc);
a7 = (const uint16_t*) ((uintptr_t) a7 - kc);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vst1_u16(c6, vreinterpret_u16_f16(vacc6x0123)); c6 += 4;
vst1_u16(c7, vreinterpret_u16_f16(vacc7x0123)); c7 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
vacc6x0123 = vget_high_f16(vacc6x01234567);
vacc7x0123 = vget_high_f16(vacc7x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vst1_lane_u32((void*) c6, vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
vst1_lane_u32((void*) c7, vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
vst1_lane_u16(c6, vreinterpret_u16_f16(vacc6x0123), 0);
vst1_lane_u16(c7, vreinterpret_u16_f16(vacc7x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,036 | 50.008982 | 132 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear-chw/gen/f16-ibilinear-chw-neonfp16arith-p16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear-chw/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p16(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t** i = (const uint16_t**)input;
const uint16_t* w = weights;
size_t p = output_pixels;
for (; p >= 16; p -= 16) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
const uint16_t* itl2 = (const uint16_t*) ((uintptr_t) i[4] + input_offset);
const uint16_t* ibl2 = (const uint16_t*) ((uintptr_t) i[5] + input_offset);
const uint16_t* itl3 = (const uint16_t*) ((uintptr_t) i[6] + input_offset);
const uint16_t* ibl3 = (const uint16_t*) ((uintptr_t) i[7] + input_offset);
const uint16_t* itl4 = (const uint16_t*) ((uintptr_t) i[8] + input_offset);
const uint16_t* ibl4 = (const uint16_t*) ((uintptr_t) i[9] + input_offset);
const uint16_t* itl5 = (const uint16_t*) ((uintptr_t) i[10] + input_offset);
const uint16_t* ibl5 = (const uint16_t*) ((uintptr_t) i[11] + input_offset);
const uint16_t* itl6 = (const uint16_t*) ((uintptr_t) i[12] + input_offset);
const uint16_t* ibl6 = (const uint16_t*) ((uintptr_t) i[13] + input_offset);
const uint16_t* itl7 = (const uint16_t*) ((uintptr_t) i[14] + input_offset);
const uint16_t* ibl7 = (const uint16_t*) ((uintptr_t) i[15] + input_offset);
const uint16_t* itl8 = (const uint16_t*) ((uintptr_t) i[16] + input_offset);
const uint16_t* ibl8 = (const uint16_t*) ((uintptr_t) i[17] + input_offset);
const uint16_t* itl9 = (const uint16_t*) ((uintptr_t) i[18] + input_offset);
const uint16_t* ibl9 = (const uint16_t*) ((uintptr_t) i[19] + input_offset);
const uint16_t* itlA = (const uint16_t*) ((uintptr_t) i[20] + input_offset);
const uint16_t* iblA = (const uint16_t*) ((uintptr_t) i[21] + input_offset);
const uint16_t* itlB = (const uint16_t*) ((uintptr_t) i[22] + input_offset);
const uint16_t* iblB = (const uint16_t*) ((uintptr_t) i[23] + input_offset);
const uint16_t* itlC = (const uint16_t*) ((uintptr_t) i[24] + input_offset);
const uint16_t* iblC = (const uint16_t*) ((uintptr_t) i[25] + input_offset);
const uint16_t* itlD = (const uint16_t*) ((uintptr_t) i[26] + input_offset);
const uint16_t* iblD = (const uint16_t*) ((uintptr_t) i[27] + input_offset);
const uint16_t* itlE = (const uint16_t*) ((uintptr_t) i[28] + input_offset);
const uint16_t* iblE = (const uint16_t*) ((uintptr_t) i[29] + input_offset);
const uint16_t* itlF = (const uint16_t*) ((uintptr_t) i[30] + input_offset);
const uint16_t* iblF = (const uint16_t*) ((uintptr_t) i[31] + input_offset);
i += 2 * 16;
const uint16x4x2_t vw0123 = vld2_u16(w); w += 8;
const uint16x4x2_t vw4567 = vld2_u16(w); w += 8;
const uint16x4x2_t vw89AB = vld2_u16(w); w += 8;
const uint16x4x2_t vwCDEF = vld2_u16(w); w += 8;
float16x8_t vtltr0123 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl0));
float16x8_t vblbr0123 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl0));
float16x8_t vtltr4567 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl4));
float16x8_t vblbr4567 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl4));
float16x8_t vtltr89AB = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl8));
float16x8_t vblbr89AB = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl8));
float16x8_t vtltrCDEF = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itlC));
float16x8_t vblbrCDEF = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) iblC));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr0123), 1));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr0123), 1));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl5, vreinterpretq_u32_f16(vtltr4567), 1));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl5, vreinterpretq_u32_f16(vblbr4567), 1));
vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl9, vreinterpretq_u32_f16(vtltr89AB), 1));
vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl9, vreinterpretq_u32_f16(vblbr89AB), 1));
vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlD, vreinterpretq_u32_f16(vtltrCDEF), 1));
vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblD, vreinterpretq_u32_f16(vblbrCDEF), 1));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr0123), 2));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr0123), 2));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl6, vreinterpretq_u32_f16(vtltr4567), 2));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl6, vreinterpretq_u32_f16(vblbr4567), 2));
vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlA, vreinterpretq_u32_f16(vtltr89AB), 2));
vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblA, vreinterpretq_u32_f16(vblbr89AB), 2));
vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlE, vreinterpretq_u32_f16(vtltrCDEF), 2));
vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblE, vreinterpretq_u32_f16(vblbrCDEF), 2));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr0123), 3));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr0123), 3));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl7, vreinterpretq_u32_f16(vtltr4567), 3));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl7, vreinterpretq_u32_f16(vblbr4567), 3));
vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlB, vreinterpretq_u32_f16(vtltr89AB), 3));
vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblB, vreinterpretq_u32_f16(vblbr89AB), 3));
vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlF, vreinterpretq_u32_f16(vtltrCDEF), 3));
vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblF, vreinterpretq_u32_f16(vblbrCDEF), 3));
const float16x8_t valphah01234567 = vreinterpretq_f16_u16(vcombine_u16(vw0123.val[0], vw4567.val[0]));
const float16x8_t valphav01234567 = vreinterpretq_f16_u16(vcombine_u16(vw0123.val[1], vw4567.val[1]));
const float16x8_t valphah89ABCDEF = vreinterpretq_f16_u16(vcombine_u16(vw89AB.val[0], vwCDEF.val[0]));
const float16x8_t valphav89ABCDEF = vreinterpretq_f16_u16(vcombine_u16(vw89AB.val[1], vwCDEF.val[1]));
const float16x8_t vldrd0123 = vsubq_f16(vblbr0123, vtltr0123);
const float16x8_t vldrd4567 = vsubq_f16(vblbr4567, vtltr4567);
const float16x8_t vldrd89AB = vsubq_f16(vblbr89AB, vtltr89AB);
const float16x8_t vldrdCDEF = vsubq_f16(vblbrCDEF, vtltrCDEF);
const float16x8x2_t vld_t01234567 = vuzpq_f16(vldrd0123, vldrd4567);
const float16x8_t vld01234567 = vld_t01234567.val[0];
const float16x8_t vrd01234567 = vld_t01234567.val[1];
const float16x8x2_t vld_t89ABCDEF = vuzpq_f16(vldrd89AB, vldrdCDEF);
const float16x8_t vld89ABCDEF = vld_t89ABCDEF.val[0];
const float16x8_t vrd89ABCDEF = vld_t89ABCDEF.val[1];
const float16x8x2_t vtl_t01234567 = vuzpq_f16(vtltr0123, vtltr4567);
const float16x8_t vtl01234567 = vtl_t01234567.val[0];
const float16x8_t vtr01234567 = vtl_t01234567.val[1];
const float16x8x2_t vtl_t89ABCDEF = vuzpq_f16(vtltr89AB, vtltrCDEF);
const float16x8_t vtl89ABCDEF = vtl_t89ABCDEF.val[0];
const float16x8_t vtr89ABCDEF = vtl_t89ABCDEF.val[1];
const float16x8_t vl01234567 = vfmaq_f16(vtl01234567, vld01234567, valphav01234567);
const float16x8_t vr01234567 = vfmaq_f16(vtr01234567, vrd01234567, valphav01234567);
const float16x8_t vl89ABCDEF = vfmaq_f16(vtl89ABCDEF, vld89ABCDEF, valphav89ABCDEF);
const float16x8_t vr89ABCDEF = vfmaq_f16(vtr89ABCDEF, vrd89ABCDEF, valphav89ABCDEF);
const float16x8_t vd01234567 = vsubq_f16(vr01234567, vl01234567);
const float16x8_t vd89ABCDEF = vsubq_f16(vr89ABCDEF, vl89ABCDEF);
const float16x8_t vo01234567 = vfmaq_f16(vl01234567, vd01234567, valphah01234567);
const float16x8_t vo89ABCDEF = vfmaq_f16(vl89ABCDEF, vd89ABCDEF, valphah89ABCDEF);
vst1q_u16(o, vreinterpretq_u16_f16(vo01234567)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vo89ABCDEF)); o += 8;
}
for (; p >= 4; p -= 4) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
const uint16_t* itl2 = (const uint16_t*) ((uintptr_t) i[4] + input_offset);
const uint16_t* ibl2 = (const uint16_t*) ((uintptr_t) i[5] + input_offset);
const uint16_t* itl3 = (const uint16_t*) ((uintptr_t) i[6] + input_offset);
const uint16_t* ibl3 = (const uint16_t*) ((uintptr_t) i[7] + input_offset);
i += 8;
const uint16x4x2_t vw = vld2_u16(w); w += 8;
float16x8_t vtltr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl0));
float16x8_t vblbr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl0));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr), 1));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr), 1));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr), 2));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr), 2));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr), 3));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr), 3));
const float16x4_t valphah = vreinterpret_f16_u16(vw.val[0]);
const float16x4_t valphav = vreinterpret_f16_u16(vw.val[1]);
const float16x8_t vldrd = vsubq_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vget_low_f16(vldrd), vget_high_f16(vldrd));
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vget_low_f16(vtltr), vget_high_f16(vtltr));
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_u16(o, vreinterpret_u16_f16(vo)); o += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float16x4_t vw = vreinterpret_f16_u16(vld1_u16(w)); w += 4;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl0));
float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl0));
vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl1, vreinterpret_u32_f16(vtltr), 1));
vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl1, vreinterpret_u32_f16(vblbr), 1));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo), 0); o += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const uint16_t* itl = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float16x4_t vw = vreinterpret_f16_u32(vld1_dup_u32((const void*) w)); w += 2;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
const float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl));
const float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u16(o, vreinterpret_u16_f16(vo), 0); o += 1;
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 15,581 | 55.252708 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear-chw/gen/f16-ibilinear-chw-neonfp16arith-p4.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear-chw/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p4(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t** i = (const uint16_t**)input;
const uint16_t* w = weights;
size_t p = output_pixels;
for (; p >= 4; p -= 4) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
const uint16_t* itl2 = (const uint16_t*) ((uintptr_t) i[4] + input_offset);
const uint16_t* ibl2 = (const uint16_t*) ((uintptr_t) i[5] + input_offset);
const uint16_t* itl3 = (const uint16_t*) ((uintptr_t) i[6] + input_offset);
const uint16_t* ibl3 = (const uint16_t*) ((uintptr_t) i[7] + input_offset);
i += 8;
const uint16x4x2_t vw = vld2_u16(w); w += 8;
float16x8_t vtltr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl0));
float16x8_t vblbr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl0));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr), 1));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr), 1));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr), 2));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr), 2));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr), 3));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr), 3));
const float16x4_t valphah = vreinterpret_f16_u16(vw.val[0]);
const float16x4_t valphav = vreinterpret_f16_u16(vw.val[1]);
const float16x8_t vldrd = vsubq_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vget_low_f16(vldrd), vget_high_f16(vldrd));
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vget_low_f16(vtltr), vget_high_f16(vtltr));
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_u16(o, vreinterpret_u16_f16(vo)); o += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float16x4_t vw = vreinterpret_f16_u16(vld1_u16(w)); w += 4;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl0));
float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl0));
vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl1, vreinterpret_u32_f16(vtltr), 1));
vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl1, vreinterpret_u32_f16(vblbr), 1));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo), 0); o += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const uint16_t* itl = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float16x4_t vw = vreinterpret_f16_u32(vld1_dup_u32((const void*) w)); w += 2;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
const float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl));
const float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u16(o, vreinterpret_u16_f16(vo), 0); o += 1;
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 6,904 | 40.596386 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear-chw/gen/f16-ibilinear-chw-neonfp16arith-p8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear-chw/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/ibilinear.h>
void xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p8(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t input_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(input_increment % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t** i = (const uint16_t**)input;
const uint16_t* w = weights;
size_t p = output_pixels;
for (; p >= 8; p -= 8) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
const uint16_t* itl2 = (const uint16_t*) ((uintptr_t) i[4] + input_offset);
const uint16_t* ibl2 = (const uint16_t*) ((uintptr_t) i[5] + input_offset);
const uint16_t* itl3 = (const uint16_t*) ((uintptr_t) i[6] + input_offset);
const uint16_t* ibl3 = (const uint16_t*) ((uintptr_t) i[7] + input_offset);
const uint16_t* itl4 = (const uint16_t*) ((uintptr_t) i[8] + input_offset);
const uint16_t* ibl4 = (const uint16_t*) ((uintptr_t) i[9] + input_offset);
const uint16_t* itl5 = (const uint16_t*) ((uintptr_t) i[10] + input_offset);
const uint16_t* ibl5 = (const uint16_t*) ((uintptr_t) i[11] + input_offset);
const uint16_t* itl6 = (const uint16_t*) ((uintptr_t) i[12] + input_offset);
const uint16_t* ibl6 = (const uint16_t*) ((uintptr_t) i[13] + input_offset);
const uint16_t* itl7 = (const uint16_t*) ((uintptr_t) i[14] + input_offset);
const uint16_t* ibl7 = (const uint16_t*) ((uintptr_t) i[15] + input_offset);
i += 2 * 8;
const uint16x4x2_t vw0123 = vld2_u16(w); w += 8;
const uint16x4x2_t vw4567 = vld2_u16(w); w += 8;
float16x8_t vtltr0123 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl0));
float16x8_t vblbr0123 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl0));
float16x8_t vtltr4567 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl4));
float16x8_t vblbr4567 = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl4));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr0123), 1));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr0123), 1));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl5, vreinterpretq_u32_f16(vtltr4567), 1));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl5, vreinterpretq_u32_f16(vblbr4567), 1));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr0123), 2));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr0123), 2));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl6, vreinterpretq_u32_f16(vtltr4567), 2));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl6, vreinterpretq_u32_f16(vblbr4567), 2));
vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr0123), 3));
vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr0123), 3));
vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl7, vreinterpretq_u32_f16(vtltr4567), 3));
vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl7, vreinterpretq_u32_f16(vblbr4567), 3));
const float16x8_t valphah01234567 = vreinterpretq_f16_u16(vcombine_u16(vw0123.val[0], vw4567.val[0]));
const float16x8_t valphav01234567 = vreinterpretq_f16_u16(vcombine_u16(vw0123.val[1], vw4567.val[1]));
const float16x8_t vldrd0123 = vsubq_f16(vblbr0123, vtltr0123);
const float16x8_t vldrd4567 = vsubq_f16(vblbr4567, vtltr4567);
const float16x8x2_t vld_t01234567 = vuzpq_f16(vldrd0123, vldrd4567);
const float16x8_t vld01234567 = vld_t01234567.val[0];
const float16x8_t vrd01234567 = vld_t01234567.val[1];
const float16x8x2_t vtl_t01234567 = vuzpq_f16(vtltr0123, vtltr4567);
const float16x8_t vtl01234567 = vtl_t01234567.val[0];
const float16x8_t vtr01234567 = vtl_t01234567.val[1];
const float16x8_t vl01234567 = vfmaq_f16(vtl01234567, vld01234567, valphav01234567);
const float16x8_t vr01234567 = vfmaq_f16(vtr01234567, vrd01234567, valphav01234567);
const float16x8_t vd01234567 = vsubq_f16(vr01234567, vl01234567);
const float16x8_t vo01234567 = vfmaq_f16(vl01234567, vd01234567, valphah01234567);
vst1q_u16(o, vreinterpretq_u16_f16(vo01234567)); o += 8;
}
for (; p >= 4; p -= 4) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
const uint16_t* itl2 = (const uint16_t*) ((uintptr_t) i[4] + input_offset);
const uint16_t* ibl2 = (const uint16_t*) ((uintptr_t) i[5] + input_offset);
const uint16_t* itl3 = (const uint16_t*) ((uintptr_t) i[6] + input_offset);
const uint16_t* ibl3 = (const uint16_t*) ((uintptr_t) i[7] + input_offset);
i += 8;
const uint16x4x2_t vw = vld2_u16(w); w += 8;
float16x8_t vtltr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) itl0));
float16x8_t vblbr = vreinterpretq_f16_u32(vld1q_dup_u32((const void*) ibl0));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr), 1));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr), 1));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr), 2));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr), 2));
vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr), 3));
vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr), 3));
const float16x4_t valphah = vreinterpret_f16_u16(vw.val[0]);
const float16x4_t valphav = vreinterpret_f16_u16(vw.val[1]);
const float16x8_t vldrd = vsubq_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vget_low_f16(vldrd), vget_high_f16(vldrd));
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vget_low_f16(vtltr), vget_high_f16(vtltr));
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_u16(o, vreinterpret_u16_f16(vo)); o += 4;
}
if XNN_UNLIKELY(p != 0) {
if (p & 2) {
const uint16_t* itl0 = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl0 = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
const uint16_t* itl1 = (const uint16_t*) ((uintptr_t) i[2] + input_offset);
const uint16_t* ibl1 = (const uint16_t*) ((uintptr_t) i[3] + input_offset);
i += 4;
const float16x4_t vw = vreinterpret_f16_u16(vld1_u16(w)); w += 4;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl0));
float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl0));
vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl1, vreinterpret_u32_f16(vtltr), 1));
vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl1, vreinterpret_u32_f16(vblbr), 1));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo), 0); o += 2;
}
if (p & 1) {
// We are computing the following formula:
// result = (1 - alpha_h) * (1 - alpha_v) * top_left +
// alpha_h * (1 - alpha_v) * top_right +
// (1 - alpha_h) * alpha_v * bottom_left +
// alpha_h * alpha_v * bottom_right.
//
// Rearranging gives
// result = left + alpha_h * (right - left),
// where
// left = top_left + alpha_v * (bottom_left - top_left),
// right = top_right + alpha_v * (bottom_right - top_right).
const uint16_t* itl = (const uint16_t*) ((uintptr_t) i[0] + input_offset);
const uint16_t* ibl = (const uint16_t*) ((uintptr_t) i[1] + input_offset);
i += 2;
const float16x4_t vw = vreinterpret_f16_u32(vld1_dup_u32((const void*) w)); w += 2;
const float16x4x2_t vwhv = vuzp_f16(vw, vw);
const float16x4_t valphah = vwhv.val[0];
const float16x4_t valphav = vwhv.val[1];
const float16x4_t vtltr = vreinterpret_f16_u32(vld1_dup_u32((const void*) itl));
const float16x4_t vblbr = vreinterpret_f16_u32(vld1_dup_u32((const void*) ibl));
const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
const float16x4_t vld = vld_t.val[0];
const float16x4_t vrd = vld_t.val[1];
const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
const float16x4_t vtl = vtl_t.val[0];
const float16x4_t vtr = vtl_t.val[1];
const float16x4_t vl = vfma_f16(vtl, vld, valphav);
const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
const float16x4_t vd = vsub_f16(vr, vl);
const float16x4_t vo = vfma_f16(vl, vd, valphah);
vst1_lane_u16(o, vreinterpret_u16_f16(vo), 0); o += 1;
}
}
input_offset += input_increment;
} while (--channels != 0);
}
| 11,267 | 48.421053 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear/gen/f16-ibilinear-fma3-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear/fma3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_ibilinear_ukernel__fma3_c8(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input[0] + input_offset);
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) input[1] + input_offset);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) input[2] + input_offset);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const __m256 valphahv = _mm256_cvtph_ps(_mm_castps_si128(_mm_broadcast_ss(weights)));
const __m256 valphah = _mm256_permute_ps(valphahv, _MM_SHUFFLE(2, 0, 2, 0));
const __m256 valphav = _mm256_permute_ps(valphahv, _MM_SHUFFLE(3, 1, 3, 1));
weights = (const uint16_t*) weights + 2;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const __m256 vtl = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vtr = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vbl = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vbr = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vtd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vtr, vtl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vbd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vbr, vbl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vt = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vtd, valphah, vtl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vb = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vbd, valphah, vbl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, vt), _MM_FROUND_TO_NEAREST_INT));
const __m128i vo = _mm256_cvtps_ph(_mm256_fmadd_ps(vd, valphav, vt), _MM_FROUND_TO_NEAREST_INT);
_mm_storeu_si128((__m128i*) o, vo);
o += 8;
}
if XNN_UNLIKELY(c != 0) {
const __m256 vtl = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
i0 += 8;
const __m256 vtr = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
i1 += 8;
const __m256 vbl = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
i2 += 8;
const __m256 vbr = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
i3 += 8;
const __m256 vtd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vtr, vtl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vbd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vbr, vbl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vt = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vtd, valphah, vtl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vb = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vbd, valphah, vbl), _MM_FROUND_TO_NEAREST_INT));
const __m256 vd = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_sub_ps(vb, vt), _MM_FROUND_TO_NEAREST_INT));
__m128i vo = _mm256_cvtps_ph(_mm256_fmadd_ps(vd, valphav, vt), _MM_FROUND_TO_NEAREST_INT);
if (c & (4 * sizeof(uint16_t))) {
_mm_storel_epi64((__m128i*) o, vo);
vo = _mm_unpackhi_epi64(vo, vo);
o += 4;
}
if (c & (2 * sizeof(uint16_t))) {
_mm_storeu_si32(o, vo);
vo = _mm_srli_epi64(vo, 32);
o += 2;
}
if (c & (1 * sizeof(uint16_t))) {
*o = (uint16_t) _mm_extract_epi16(vo, 0);
o += 1;
}
}
o = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 4,253 | 38.757009 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear/gen/f16-ibilinear-neonfp16arith-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_f16_ibilinear_ukernel__neonfp16arith_c16(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input[0] + input_offset);
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) input[1] + input_offset);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) input[2] + input_offset);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const float16x8_t valphah = vreinterpretq_f16_u16(vld1q_dup_u16(weights)); weights = (const uint16_t*) weights + 1;
const float16x8_t valphav = vreinterpretq_f16_u16(vld1q_dup_u16(weights)); weights = (const uint16_t*) weights + 1;
size_t c = channels;
for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
const float16x8_t vtl456789AB = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vtr456789AB = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbl456789AB = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vbr456789AB = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vtlCDEFGHIJ = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vtrCDEFGHIJ = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vblCDEFGHIJ = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vbrCDEFGHIJ = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vtd456789AB = vsubq_f16(vtr456789AB, vtl456789AB);
const float16x8_t vbd456789AB = vsubq_f16(vbr456789AB, vbl456789AB);
const float16x8_t vtdCDEFGHIJ = vsubq_f16(vtrCDEFGHIJ, vtlCDEFGHIJ);
const float16x8_t vbdCDEFGHIJ = vsubq_f16(vbrCDEFGHIJ, vblCDEFGHIJ);
const float16x8_t vt456789AB = vfmaq_f16(vtl456789AB, vtd456789AB, valphah);
const float16x8_t vb456789AB = vfmaq_f16(vbl456789AB, vbd456789AB, valphah);
const float16x8_t vtCDEFGHIJ = vfmaq_f16(vtlCDEFGHIJ, vtdCDEFGHIJ, valphah);
const float16x8_t vbCDEFGHIJ = vfmaq_f16(vblCDEFGHIJ, vbdCDEFGHIJ, valphah);
const float16x8_t vd456789AB = vsubq_f16(vb456789AB, vt456789AB);
const float16x8_t vdCDEFGHIJ = vsubq_f16(vbCDEFGHIJ, vtCDEFGHIJ);
const float16x8_t vo456789AB = vfmaq_f16(vt456789AB, vd456789AB, valphav);
const float16x8_t voCDEFGHIJ = vfmaq_f16(vtCDEFGHIJ, vdCDEFGHIJ, valphav);
vst1q_u16(o, vreinterpretq_u16_f16(vo456789AB)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(voCDEFGHIJ)); o += 8;
}
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vtl = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vtr = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbl = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vbr = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vtd = vsubq_f16(vtr, vtl);
const float16x8_t vbd = vsubq_f16(vbr, vbl);
const float16x8_t vt = vfmaq_f16(vtl, vtd, valphah);
const float16x8_t vb = vfmaq_f16(vbl, vbd, valphah);
const float16x8_t vd = vsubq_f16(vb, vt);
const float16x8_t vo = vfmaq_f16(vt, vd, valphav);
vst1q_u16(o, vreinterpretq_u16_f16(vo)); o += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vtl = vreinterpretq_f16_u16(vld1q_u16(i0));
const float16x8_t vtr = vreinterpretq_f16_u16(vld1q_u16(i1));
const float16x8_t vbl = vreinterpretq_f16_u16(vld1q_u16(i2));
const float16x8_t vbr = vreinterpretq_f16_u16(vld1q_u16(i3));
const float16x8_t vtd = vsubq_f16(vtr, vtl);
const float16x8_t vbd = vsubq_f16(vbr, vbl);
const float16x8_t vt = vfmaq_f16(vtl, vtd, valphah);
const float16x8_t vb = vfmaq_f16(vbl, vbd, valphah);
const float16x8_t vd = vsubq_f16(vb, vt);
float16x8_t vo = vfmaq_f16(vt, vd, valphav);
float16x4_t vo_lo = vget_low_f16(vo);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vo_lo)); o += 4;
vo_lo = vget_high_f16(vo);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo_lo), 0); o += 2;
vo_lo = vext_f16(vo_lo, vo_lo, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vo_lo), 0); o += 1;
}
}
o = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 5,185 | 41.162602 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-ibilinear/gen/f16-ibilinear-neonfp16arith-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-ibilinear/neonfp16arith.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/ibilinear.h>
void xnn_f16_ibilinear_ukernel__neonfp16arith_c8(
size_t output_pixels,
size_t channels,
const void** restrict input,
size_t input_offset,
const void* restrict weights,
void* restrict output,
size_t output_increment) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(channels != 0);
assert(channels % sizeof(uint16_t) == 0);
uint16_t* o = (uint16_t*) output;
do {
const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input[0] + input_offset);
const uint16_t* i1 = (const uint16_t*) ((uintptr_t) input[1] + input_offset);
const uint16_t* i2 = (const uint16_t*) ((uintptr_t) input[2] + input_offset);
const uint16_t* i3 = (const uint16_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const float16x8_t valphah = vreinterpretq_f16_u16(vld1q_dup_u16(weights)); weights = (const uint16_t*) weights + 1;
const float16x8_t valphav = vreinterpretq_f16_u16(vld1q_dup_u16(weights)); weights = (const uint16_t*) weights + 1;
size_t c = channels;
for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
const float16x8_t vtl = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8;
const float16x8_t vtr = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8;
const float16x8_t vbl = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8;
const float16x8_t vbr = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8;
const float16x8_t vtd = vsubq_f16(vtr, vtl);
const float16x8_t vbd = vsubq_f16(vbr, vbl);
const float16x8_t vt = vfmaq_f16(vtl, vtd, valphah);
const float16x8_t vb = vfmaq_f16(vbl, vbd, valphah);
const float16x8_t vd = vsubq_f16(vb, vt);
const float16x8_t vo = vfmaq_f16(vt, vd, valphav);
vst1q_u16(o, vreinterpretq_u16_f16(vo)); o += 8;
}
if XNN_UNLIKELY(c != 0) {
const float16x8_t vtl = vreinterpretq_f16_u16(vld1q_u16(i0));
const float16x8_t vtr = vreinterpretq_f16_u16(vld1q_u16(i1));
const float16x8_t vbl = vreinterpretq_f16_u16(vld1q_u16(i2));
const float16x8_t vbr = vreinterpretq_f16_u16(vld1q_u16(i3));
const float16x8_t vtd = vsubq_f16(vtr, vtl);
const float16x8_t vbd = vsubq_f16(vbr, vbl);
const float16x8_t vt = vfmaq_f16(vtl, vtd, valphah);
const float16x8_t vb = vfmaq_f16(vbl, vbd, valphah);
const float16x8_t vd = vsubq_f16(vb, vt);
float16x8_t vo = vfmaq_f16(vt, vd, valphav);
float16x4_t vo_lo = vget_low_f16(vo);
if (c & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vo_lo)); o += 4;
vo_lo = vget_high_f16(vo);
}
if (c & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo_lo), 0); o += 2;
vo_lo = vext_f16(vo_lo, vo_lo, 2);
}
if (c & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vo_lo), 0); o += 1;
}
}
o = (uint16_t*) ((uintptr_t) o + output_increment);
} while (--output_pixels != 0);
}
| 3,360 | 34.755319 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-1x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_1x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vacc0x01234567 = vacc0x89ABCDEF;
}
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,370 | 38.32716 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-1x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_1x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,533 | 31.618705 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-4x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_4x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
}
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,036 | 49.123333 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-4x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_4x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,043 | 42.293103 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-6x16-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc4x89ABCDEF = vacc0x89ABCDEF;
float16x8_t vacc5x01234567 = vacc0x01234567;
float16x8_t vacc5x89ABCDEF = vacc0x89ABCDEF;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
const uint16_t* restrict a5 = (const uint16_t*) a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEFc3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc0x89ABCDEF = vmlaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
vacc1x89ABCDEF = vmlaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
vacc2x89ABCDEF = vmlaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
vacc3x89ABCDEF = vmlaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
vacc4x89ABCDEF = vmlaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
vacc5x89ABCDEF = vmlaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
const float16x8_t vb89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
if XNN_LIKELY(nc >= 16) {
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
vst1q_u16(c5 + 8, vreinterpretq_u16_f16(vacc5x89ABCDEF));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
vst1q_u16(c4 + 8, vreinterpretq_u16_f16(vacc4x89ABCDEF));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
vst1q_u16(c3 + 8, vreinterpretq_u16_f16(vacc3x89ABCDEF));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
vst1q_u16(c2 + 8, vreinterpretq_u16_f16(vacc2x89ABCDEF));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
vst1q_u16(c1 + 8, vreinterpretq_u16_f16(vacc1x89ABCDEF));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
vst1q_u16(c0 + 8, vreinterpretq_u16_f16(vacc0x89ABCDEF));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
if (nc & 8) {
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567)); c5 += 8;
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567)); c4 += 8;
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567)); c3 += 8;
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567)); c2 += 8;
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567)); c1 += 8;
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567)); c0 += 8;
vacc5x01234567 = vacc5x89ABCDEF;
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
}
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc5x0123 = vget_high_f16(vacc5x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,813 | 52.096939 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-6x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc5x01234567 = vacc0x01234567;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
const uint16_t* restrict a5 = (const uint16_t*) a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
}
a += 6;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc5x0123 = vget_high_f16(vacc5x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,716 | 45.656463 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/f16-igemm/gen/f16-igemm-8x8-minmax-neonfp16arith-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f16-igemm/neonfp16arith-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const void** restrict a,
const void* restrict w,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const void* zero,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(ks != 0);
assert(ks % (8 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint16_t* c0 = (uint16_t*) c;
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
c5 = c4;
}
uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
c6 = c5;
}
uint16_t* c7 = (uint16_t*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
c7 = c6;
}
do {
float16x8_t vacc0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
float16x8_t vacc1x01234567 = vacc0x01234567;
float16x8_t vacc2x01234567 = vacc0x01234567;
float16x8_t vacc3x01234567 = vacc0x01234567;
float16x8_t vacc4x01234567 = vacc0x01234567;
float16x8_t vacc5x01234567 = vacc0x01234567;
float16x8_t vacc6x01234567 = vacc0x01234567;
float16x8_t vacc7x01234567 = vacc0x01234567;
size_t p = ks;
do {
const uint16_t* restrict a0 = (const uint16_t*) a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
}
const uint16_t* restrict a1 = (const uint16_t*) a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
}
const uint16_t* restrict a2 = (const uint16_t*) a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
}
const uint16_t* restrict a3 = (const uint16_t*) a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
}
const uint16_t* restrict a4 = (const uint16_t*) a[4];
assert(a4 != NULL);
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
}
const uint16_t* restrict a5 = (const uint16_t*) a[5];
assert(a5 != NULL);
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
}
const uint16_t* restrict a6 = (const uint16_t*) a[6];
assert(a6 != NULL);
if XNN_UNPREDICTABLE(a6 != zero) {
a6 = (const uint16_t*) ((uintptr_t) a6 + a_offset);
}
const uint16_t* restrict a7 = (const uint16_t*) a[7];
assert(a7 != NULL);
if XNN_UNPREDICTABLE(a7 != zero) {
a7 = (const uint16_t*) ((uintptr_t) a7 + a_offset);
}
a += 8;
size_t k = kc;
for (; k >= 4 * sizeof(uint16_t); k -= 4 * sizeof(uint16_t)) {
const float16x4_t va0 = vreinterpret_f16_u16(vld1_u16(a0)); a0 += 4;
const float16x4_t va1 = vreinterpret_f16_u16(vld1_u16(a1)); a1 += 4;
const float16x4_t va2 = vreinterpret_f16_u16(vld1_u16(a2)); a2 += 4;
const float16x4_t va3 = vreinterpret_f16_u16(vld1_u16(a3)); a3 += 4;
const float16x4_t va4 = vreinterpret_f16_u16(vld1_u16(a4)); a4 += 4;
const float16x4_t va5 = vreinterpret_f16_u16(vld1_u16(a5)); a5 += 4;
const float16x4_t va6 = vreinterpret_f16_u16(vld1_u16(a6)); a6 += 4;
const float16x4_t va7 = vreinterpret_f16_u16(vld1_u16(a7)); a7 += 4;
const float16x8_t vb01234567c0 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
#endif
const float16x8_t vb01234567c1 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
#endif
const float16x8_t vb01234567c2 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
#endif
const float16x8_t vb01234567c3 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
#if XNN_ARCH_ARM64
vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#else
vacc0x01234567 = vmlaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
vacc1x01234567 = vmlaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
vacc2x01234567 = vmlaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
vacc3x01234567 = vmlaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
vacc4x01234567 = vmlaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
vacc5x01234567 = vmlaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
vacc6x01234567 = vmlaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
vacc7x01234567 = vmlaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
#endif
}
if XNN_UNLIKELY(k != 0) {
do {
const float16x8_t va0 = vreinterpretq_f16_u16(vld1q_dup_u16(a0)); a0 += 1;
const float16x8_t va1 = vreinterpretq_f16_u16(vld1q_dup_u16(a1)); a1 += 1;
const float16x8_t va2 = vreinterpretq_f16_u16(vld1q_dup_u16(a2)); a2 += 1;
const float16x8_t va3 = vreinterpretq_f16_u16(vld1q_dup_u16(a3)); a3 += 1;
const float16x8_t va4 = vreinterpretq_f16_u16(vld1q_dup_u16(a4)); a4 += 1;
const float16x8_t va5 = vreinterpretq_f16_u16(vld1q_dup_u16(a5)); a5 += 1;
const float16x8_t va6 = vreinterpretq_f16_u16(vld1q_dup_u16(a6)); a6 += 1;
const float16x8_t va7 = vreinterpretq_f16_u16(vld1q_dup_u16(a7)); a7 += 1;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
k -= sizeof(uint16_t);
} while (k != 0);
}
p -= 8 * sizeof(void*);
} while (p != 0);
const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
if XNN_LIKELY(nc >= 8) {
vst1q_u16(c7, vreinterpretq_u16_f16(vacc7x01234567));
c7 = (uint16_t*) ((uintptr_t) c7 + cn_stride);
vst1q_u16(c6, vreinterpretq_u16_f16(vacc6x01234567));
c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
vst1q_u16(c5, vreinterpretq_u16_f16(vacc5x01234567));
c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
vst1q_u16(c4, vreinterpretq_u16_f16(vacc4x01234567));
c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
vst1q_u16(c3, vreinterpretq_u16_f16(vacc3x01234567));
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
vst1q_u16(c2, vreinterpretq_u16_f16(vacc2x01234567));
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1q_u16(c1, vreinterpretq_u16_f16(vacc1x01234567));
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1q_u16(c0, vreinterpretq_u16_f16(vacc0x01234567));
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a = (const void**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
if (nc & 4) {
vst1_u16(c7, vreinterpret_u16_f16(vacc7x0123)); c7 += 4;
vst1_u16(c6, vreinterpret_u16_f16(vacc6x0123)); c6 += 4;
vst1_u16(c5, vreinterpret_u16_f16(vacc5x0123)); c5 += 4;
vst1_u16(c4, vreinterpret_u16_f16(vacc4x0123)); c4 += 4;
vst1_u16(c3, vreinterpret_u16_f16(vacc3x0123)); c3 += 4;
vst1_u16(c2, vreinterpret_u16_f16(vacc2x0123)); c2 += 4;
vst1_u16(c1, vreinterpret_u16_f16(vacc1x0123)); c1 += 4;
vst1_u16(c0, vreinterpret_u16_f16(vacc0x0123)); c0 += 4;
vacc7x0123 = vget_high_f16(vacc7x01234567);
vacc6x0123 = vget_high_f16(vacc6x01234567);
vacc5x0123 = vget_high_f16(vacc5x01234567);
vacc4x0123 = vget_high_f16(vacc4x01234567);
vacc3x0123 = vget_high_f16(vacc3x01234567);
vacc2x0123 = vget_high_f16(vacc2x01234567);
vacc1x0123 = vget_high_f16(vacc1x01234567);
vacc0x0123 = vget_high_f16(vacc0x01234567);
}
if (nc & 2) {
vst1_lane_u32((void*) c7, vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
vst1_lane_u32((void*) c6, vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c7, vreinterpret_u16_f16(vacc7x0123), 0);
vst1_lane_u16(c6, vreinterpret_u16_f16(vacc6x0123), 0);
vst1_lane_u16(c5, vreinterpret_u16_f16(vacc5x0123), 0);
vst1_lane_u16(c4, vreinterpret_u16_f16(vacc4x0123), 0);
vst1_lane_u16(c3, vreinterpret_u16_f16(vacc3x0123), 0);
vst1_lane_u16(c2, vreinterpret_u16_f16(vacc2x0123), 0);
vst1_lane_u16(c1, vreinterpret_u16_f16(vacc1x0123), 0);
vst1_lane_u16(c0, vreinterpret_u16_f16(vacc0x0123), 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,389 | 47.848315 | 134 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.