repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,950 | 38.258772 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,650 | 43.080139 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-sse2-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__sse2_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) w);
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,441 | 32.035897 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-sse41-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__sse41_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const void*) w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 12,397 | 42.048611 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8-minmax-sse41-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8__sse41_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,425 | 31.785714 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8s4-minmax-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 17,075 | 50.589124 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-4x8s4-minmax-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_4x8s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 16,649 | 49.150602 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x16-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x16__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc0x89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256i vbi89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) ((const int8_t*) w + 8)));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
const __m256 vb89ABCDEF = _mm256_cvtepi32_ps(vbi89ABCDEF);
w = (const int8_t*) w + 16;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
vacc4x01234567 = _mm256_mul_ps(vacc4x01234567, vscale01234567);
const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
vacc0x89ABCDEF = _mm256_mul_ps(vacc0x89ABCDEF, vscale89ABCDEF);
vacc1x89ABCDEF = _mm256_mul_ps(vacc1x89ABCDEF, vscale89ABCDEF);
vacc2x89ABCDEF = _mm256_mul_ps(vacc2x89ABCDEF, vscale89ABCDEF);
vacc3x89ABCDEF = _mm256_mul_ps(vacc3x89ABCDEF, vscale89ABCDEF);
vacc4x89ABCDEF = _mm256_mul_ps(vacc4x89ABCDEF, vscale89ABCDEF);
w = (const float*) w + 16;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc0x89ABCDEF = _mm256_max_ps(vmin, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_max_ps(vmin, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_max_ps(vmin, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_max_ps(vmin, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_max_ps(vmin, vacc4x89ABCDEF);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc0x89ABCDEF = _mm256_min_ps(vmax, vacc0x89ABCDEF);
vacc1x89ABCDEF = _mm256_min_ps(vmax, vacc1x89ABCDEF);
vacc2x89ABCDEF = _mm256_min_ps(vmax, vacc2x89ABCDEF);
vacc3x89ABCDEF = _mm256_min_ps(vmax, vacc3x89ABCDEF);
vacc4x89ABCDEF = _mm256_min_ps(vmax, vacc4x89ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
_mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
_mm256_storeu_ps(c3, vacc3x01234567);
_mm256_storeu_ps(c2, vacc2x01234567);
_mm256_storeu_ps(c1, vacc1x01234567);
_mm256_storeu_ps(c0, vacc0x01234567);
vacc4x01234567 = vacc4x89ABCDEF;
vacc3x01234567 = vacc3x89ABCDEF;
vacc2x01234567 = vacc2x89ABCDEF;
vacc1x01234567 = vacc1x89ABCDEF;
vacc0x01234567 = vacc0x89ABCDEF;
c4 += 8;
c3 += 8;
c2 += 8;
c1 += 8;
c0 += 8;
}
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,239 | 36.258065 | 111 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_mul_ps(vacc1x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_mul_ps(vacc2x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_mul_ps(vacc3x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_mul_ps(vacc4x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 5,923 | 36.732484 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,066 | 38.948413 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
vacc4x01234567 = _mm256_mul_ps(vacc4x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 6,665 | 33.010204 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,055 | 38.904762 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-sse2-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__sse2_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 14,911 | 44.187879 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-sse2-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__sse2_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128i vb01234567 = _mm_loadl_epi64((const __m128i *) w);
const __m128i vbw01234567 = _mm_unpacklo_epi8(vb01234567, vb01234567);
const __m128 vb0123 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567, vbw01234567), 24));
const __m128 vb4567 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567, vbw01234567), 24));
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,610 | 32.977679 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-sse41-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-dup.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__sse41_dup(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
const __m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
const __m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
const __m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
const __m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const void*) w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 14,658 | 43.287009 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8-minmax-sse41-load1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-load1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8__sse41_load1(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m128 va0 = _mm_load1_ps(a0);
a0 += 1;
const __m128 va1 = _mm_load1_ps(a1);
a1 += 1;
const __m128 va2 = _mm_load1_ps(a2);
a2 += 1;
const __m128 va3 = _mm_load1_ps(a3);
a3 += 1;
const __m128 va4 = _mm_load1_ps(a4);
a4 += 1;
const __m128i vbi0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32(w)));
const __m128i vbi4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_u32((const int8_t*) w + 4)));
const __m128 vb0123 = _mm_cvtepi32_ps(vbi0123);
const __m128 vb4567 = _mm_cvtepi32_ps(vbi4567);
w = (const int8_t*) w + 8;
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
k -= sizeof(float);
} while (k != 0);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,594 | 32.755556 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8s4-minmax-sse2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8s4__sse2(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
__m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
__m128 va4 = _mm_loadu_ps(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const __m128i vb01234567c0 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 0));
const __m128i vbw01234567c0 = _mm_unpacklo_epi8(vb01234567c0, vb01234567c0);
const __m128 vb0123c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c0, vbw01234567c0), 24));
const __m128 vb4567c0 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c0, vbw01234567c0), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c1 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 8));
const __m128i vbw01234567c1 = _mm_unpacklo_epi8(vb01234567c1, vb01234567c1);
const __m128 vb0123c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c1, vbw01234567c1), 24));
const __m128 vb4567c1 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c1, vbw01234567c1), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c2 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 16));
const __m128i vbw01234567c2 = _mm_unpacklo_epi8(vb01234567c2, vb01234567c2);
const __m128 vb0123c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c2, vbw01234567c2), 24));
const __m128 vb4567c2 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c2, vbw01234567c2), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb01234567c3 = _mm_loadl_epi64((const __m128i *) ((const int8_t*) w + 24));
const __m128i vbw01234567c3 = _mm_unpacklo_epi8(vb01234567c3, vb01234567c3);
const __m128 vb0123c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(vbw01234567c3, vbw01234567c3), 24));
const __m128 vb4567c3 = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(vbw01234567c3, vbw01234567c3), 24));
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 20,141 | 51.727749 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-5x8s4-minmax-sse41.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/sse-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_5x8s4__sse41(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
do {
__m128 vacc0x0123 = _mm_loadu_ps((const float*) w + 0);
__m128 vacc0x4567 = _mm_loadu_ps((const float*) w + 4);
__m128 vacc1x0123 = vacc0x0123;
__m128 vacc1x4567 = vacc0x4567;
__m128 vacc2x0123 = vacc0x0123;
__m128 vacc2x4567 = vacc0x4567;
__m128 vacc3x0123 = vacc0x0123;
__m128 vacc3x4567 = vacc0x4567;
__m128 vacc4x0123 = vacc0x0123;
__m128 vacc4x4567 = vacc0x4567;
w = (const float*) w + 8;
size_t k = kc;
while (k >= 4 * sizeof(float)) {
__m128 va0 = _mm_loadu_ps(a0);
a0 += 4;
__m128 va1 = _mm_loadu_ps(a1);
a1 += 4;
__m128 va2 = _mm_loadu_ps(a2);
a2 += 4;
__m128 va3 = _mm_loadu_ps(a3);
a3 += 4;
__m128 va4 = _mm_loadu_ps(a4);
a4 += 4;
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
w = (const int8_t*) w + 32;
k -= 4 * sizeof(float);
}
if XNN_UNLIKELY(k != 0) {
__m128 va0 = _mm_loadu_ps(a0);
a0 = (const float*) ((uintptr_t) a0 + k);
__m128 va1 = _mm_loadu_ps(a1);
a1 = (const float*) ((uintptr_t) a1 + k);
__m128 va2 = _mm_loadu_ps(a2);
a2 = (const float*) ((uintptr_t) a2 + k);
__m128 va3 = _mm_loadu_ps(a3);
a3 = (const float*) ((uintptr_t) a3 + k);
__m128 va4 = _mm_loadu_ps(a4);
a4 = (const float*) ((uintptr_t) a4 + k);
const __m128i vbi0123c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 0)));
const __m128i vbi4567c0 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 4)));
const __m128 vb0123c0 = _mm_cvtepi32_ps(vbi0123c0);
const __m128 vb4567c0 = _mm_cvtepi32_ps(vbi4567c0);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 8)));
const __m128i vbi4567c1 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 12)));
const __m128 vb0123c1 = _mm_cvtepi32_ps(vbi0123c1);
const __m128 vb4567c1 = _mm_cvtepi32_ps(vbi4567c1);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 16)));
const __m128i vbi4567c2 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 20)));
const __m128 vb0123c2 = _mm_cvtepi32_ps(vbi0123c2);
const __m128 vb4567c2 = _mm_cvtepi32_ps(vbi4567c2);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2));
va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vbi0123c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 24)));
const __m128i vbi4567c3 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32((const int8_t*) w + 28)));
const __m128 vb0123c3 = _mm_cvtepi32_ps(vbi0123c3);
const __m128 vb4567c3 = _mm_cvtepi32_ps(vbi4567c3);
vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3));
vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3));
w = (const int8_t*) w + 32;
}
const __m128 vscale0123 = _mm_loadu_ps((const float*) w + 0);
vacc0x0123 = _mm_mul_ps(vacc0x0123, vscale0123);
vacc1x0123 = _mm_mul_ps(vacc1x0123, vscale0123);
vacc2x0123 = _mm_mul_ps(vacc2x0123, vscale0123);
vacc3x0123 = _mm_mul_ps(vacc3x0123, vscale0123);
vacc4x0123 = _mm_mul_ps(vacc4x0123, vscale0123);
const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
vacc0x4567 = _mm_mul_ps(vacc0x4567, vscale4567);
vacc1x4567 = _mm_mul_ps(vacc1x4567, vscale4567);
vacc2x4567 = _mm_mul_ps(vacc2x4567, vscale4567);
vacc3x4567 = _mm_mul_ps(vacc3x4567, vscale4567);
vacc4x4567 = _mm_mul_ps(vacc4x4567, vscale4567);
w = (const float*) w + 8;
const __m128 vmax = _mm_load_ps(params->sse.max);
vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
const __m128 vmin = _mm_load_ps(params->sse.min);
vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
if XNN_LIKELY(nc >= 8) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm_storeu_ps(c0, vacc0x0123);
_mm_storeu_ps(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 19,715 | 50.477807 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_mul_ps(vacc1x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_mul_ps(vacc2x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_mul_ps(vacc3x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_mul_ps(vacc4x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_mul_ps(vacc5x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 6,779 | 37.965517 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x2-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x2__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x2_t vacc0x01 = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
float32x2_t vacc4x01 = vacc0x01;
float32x2_t vacc5x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint32x2_t vtmpb = vld1_dup_u32(w); w = (const int8_t*) w + 4;
const int32x4_t vtmpi = vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u32(vtmpb))));
const float32x4_t vb01c01 = vcvtq_f32_s32(vtmpi);
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc4x01 = vfma_lane_f32(vacc4x01, vb01c0, va4, 0);
vacc5x01 = vfma_lane_f32(vacc5x01, vb01c0, va5, 0);
#else
const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
const float32x2_t va4c0 = vdup_lane_f32(va4, 0);
const float32x2_t va5c0 = vdup_lane_f32(va5, 0);
vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
vacc4x01 = vfma_f32(vacc4x01, va4c0, vb01c0);
vacc5x01 = vfma_f32(vacc5x01, va5c0, vb01c0);
#endif
#if XNN_ARCH_ARM64
vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
vacc4x01 = vfma_lane_f32(vacc4x01, vb01c1, va4, 1);
vacc5x01 = vfma_lane_f32(vacc5x01, vb01c1, va5, 1);
#else
const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
const float32x2_t va4c1 = vdup_lane_f32(va4, 1);
const float32x2_t va5c1 = vdup_lane_f32(va5, 1);
vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
vacc4x01 = vfma_f32(vacc4x01, va4c1, vb01c1);
vacc5x01 = vfma_f32(vacc5x01, va5c1, vb01c1);
#endif
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1;
const float32x2_t va5 = vld1_dup_f32(a5); a5 += 1;
const uint16x4_t vtmpb = vld1_dup_u16(w); w = (const int8_t*) w + 2;
const int32x2_t vtmpi = vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u16(vtmpb)))));
const float32x2_t vb01 = vcvt_f32_s32(vtmpi);
vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
vacc4x01 = vfma_f32(vacc4x01, va4, vb01);
vacc5x01 = vfma_f32(vacc5x01, va5, vb01);
}
const float32x2_t vscale = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
vacc0x01 = vmul_f32(vacc0x01, vscale);
vacc1x01 = vmul_f32(vacc1x01, vscale);
vacc2x01 = vmul_f32(vacc2x01, vscale);
vacc3x01 = vmul_f32(vacc3x01, vscale);
vacc4x01 = vmul_f32(vacc4x01, vscale);
vacc5x01 = vmul_f32(vacc5x01, vscale);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
vacc4x01 = vmin_f32(vacc4x01, vmax);
vacc5x01 = vmin_f32(vacc5x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
vacc4x01 = vmax_f32(vacc4x01, vmin);
vacc5x01 = vmax_f32(vacc5x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1_f32(c4, vacc4x01);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1_f32(c5, vacc5x01);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 8,011 | 36.439252 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x2-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/MRx2-neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x2__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x2_t vacc0x01 = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
float32x2_t vacc1x01 = vacc0x01;
float32x2_t vacc2x01 = vacc0x01;
float32x2_t vacc3x01 = vacc0x01;
float32x2_t vacc4x01 = vacc0x01;
float32x2_t vacc5x01 = vacc0x01;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const uint32x2_t vtmpb = vld1_dup_u32(w); w = (const int8_t*) w + 4;
const int32x4_t vtmpi = vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u32(vtmpb))));
const float32x4_t vb01c01 = vcvtq_f32_s32(vtmpi);
const float32x2_t vb01c0 = vget_low_f32(vb01c01);
const float32x2_t vb01c1 = vget_high_f32(vb01c01);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c0, va0, 0);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c0, va1, 0);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c0, va2, 0);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c0, va3, 0);
vacc4x01 = vmla_lane_f32(vacc4x01, vb01c0, va4, 0);
vacc5x01 = vmla_lane_f32(vacc5x01, vb01c0, va5, 0);
vacc0x01 = vmla_lane_f32(vacc0x01, vb01c1, va0, 1);
vacc1x01 = vmla_lane_f32(vacc1x01, vb01c1, va1, 1);
vacc2x01 = vmla_lane_f32(vacc2x01, vb01c1, va2, 1);
vacc3x01 = vmla_lane_f32(vacc3x01, vb01c1, va3, 1);
vacc4x01 = vmla_lane_f32(vacc4x01, vb01c1, va4, 1);
vacc5x01 = vmla_lane_f32(vacc5x01, vb01c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x2_t va0 = vld1_dup_f32(a0); a0 += 1;
const float32x2_t va1 = vld1_dup_f32(a1); a1 += 1;
const float32x2_t va2 = vld1_dup_f32(a2); a2 += 1;
const float32x2_t va3 = vld1_dup_f32(a3); a3 += 1;
const float32x2_t va4 = vld1_dup_f32(a4); a4 += 1;
const float32x2_t va5 = vld1_dup_f32(a5); a5 += 1;
const uint16x4_t vtmpb = vld1_dup_u16(w); w = (const int8_t*) w + 2;
const int32x2_t vtmpi = vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_u16(vtmpb)))));
const float32x2_t vb01 = vcvt_f32_s32(vtmpi);
vacc0x01 = vmla_f32(vacc0x01, va0, vb01);
vacc1x01 = vmla_f32(vacc1x01, va1, vb01);
vacc2x01 = vmla_f32(vacc2x01, va2, vb01);
vacc3x01 = vmla_f32(vacc3x01, va3, vb01);
vacc4x01 = vmla_f32(vacc4x01, va4, vb01);
vacc5x01 = vmla_f32(vacc5x01, va5, vb01);
}
const float32x2_t vscale = vreinterpret_f32_u8(vld1_u8(w)); w = (const float*) w + 2;
vacc0x01 = vmul_f32(vacc0x01, vscale);
vacc1x01 = vmul_f32(vacc1x01, vscale);
vacc2x01 = vmul_f32(vacc2x01, vscale);
vacc3x01 = vmul_f32(vacc3x01, vscale);
vacc4x01 = vmul_f32(vacc4x01, vscale);
vacc5x01 = vmul_f32(vacc5x01, vscale);
const float32x2_t vmax = vld1_dup_f32(¶ms->scalar.max);
vacc0x01 = vmin_f32(vacc0x01, vmax);
vacc1x01 = vmin_f32(vacc1x01, vmax);
vacc2x01 = vmin_f32(vacc2x01, vmax);
vacc3x01 = vmin_f32(vacc3x01, vmax);
vacc4x01 = vmin_f32(vacc4x01, vmax);
vacc5x01 = vmin_f32(vacc5x01, vmax);
const float32x2_t vmin = vld1_dup_f32(¶ms->scalar.min);
vacc0x01 = vmax_f32(vacc0x01, vmin);
vacc1x01 = vmax_f32(vacc1x01, vmin);
vacc2x01 = vmax_f32(vacc2x01, vmin);
vacc3x01 = vmax_f32(vacc3x01, vmin);
vacc4x01 = vmax_f32(vacc4x01, vmin);
vacc5x01 = vmax_f32(vacc5x01, vmin);
if XNN_LIKELY(nc >= 2) {
vst1_f32(c0, vacc0x01);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
vst1_f32(c1, vacc1x01);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1_f32(c2, vacc2x01);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1_f32(c3, vacc3x01);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1_f32(c4, vacc4x01);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1_f32(c5, vacc5x01);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
a0 = (const float*) ((uintptr_t) a0 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
nc -= 2;
} else {
assert(nc == 1);
vst1_lane_f32(c0, vacc0x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c5, vacc5x01, 0);
nc = 0;
}
} while (nc != 0);
}
| 6,544 | 34.570652 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-aarch64-neonfma-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(a0); a0 += 4;
const float32x4_t va1 = vld1q_f32(a1); a1 += 4;
const float32x4_t va2 = vld1q_f32(a2); a2 += 4;
const float32x4_t va3 = vld1q_f32(a3); a3 += 4;
const float32x4_t va4 = vld1q_f32(a4); a4 += 4;
const float32x4_t va5 = vld1q_f32(a5); a5 += 4;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, vget_low_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, vget_low_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, vget_low_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, vget_low_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, vget_low_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, vget_low_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, vget_low_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, vget_low_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, vget_low_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, vget_low_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, vget_low_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, vget_low_f32(va5), 0);
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, vget_low_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, vget_low_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, vget_low_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, vget_low_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, vget_low_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, vget_low_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, vget_low_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, vget_low_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, vget_low_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, vget_low_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, vget_low_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, vget_low_f32(va5), 1);
const int8x8_t vw01234567c2 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c2 = vmovl_s8(vw01234567c2);
const int32x4_t vxw0123c2 = vmovl_s16(vget_low_s16(vxw01234567c2));
const int32x4_t vxw4567c2 = vmovl_s16(vget_high_s16(vxw01234567c2));
const float32x4_t vb0123c2 = vcvtq_f32_s32(vxw0123c2);
const float32x4_t vb4567c2 = vcvtq_f32_s32(vxw4567c2);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c2, vget_high_f32(va0), 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c2, vget_high_f32(va1), 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c2, vget_high_f32(va2), 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c2, vget_high_f32(va3), 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c2, vget_high_f32(va4), 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c2, vget_high_f32(va5), 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c2, vget_high_f32(va0), 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c2, vget_high_f32(va1), 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c2, vget_high_f32(va2), 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c2, vget_high_f32(va3), 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c2, vget_high_f32(va4), 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c2, vget_high_f32(va5), 0);
const int8x8_t vw01234567c3 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c3 = vmovl_s8(vw01234567c3);
const int32x4_t vxw0123c3 = vmovl_s16(vget_low_s16(vxw01234567c3));
const int32x4_t vxw4567c3 = vmovl_s16(vget_high_s16(vxw01234567c3));
const float32x4_t vb0123c3 = vcvtq_f32_s32(vxw0123c3);
const float32x4_t vb4567c3 = vcvtq_f32_s32(vxw4567c3);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c3, vget_high_f32(va0), 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c3, vget_high_f32(va1), 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c3, vget_high_f32(va2), 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c3, vget_high_f32(va3), 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c3, vget_high_f32(va4), 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c3, vget_high_f32(va5), 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c3, vget_high_f32(va0), 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c3, vget_high_f32(va1), 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c3, vget_high_f32(va2), 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c3, vget_high_f32(va3), 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c3, vget_high_f32(va4), 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c3, vget_high_f32(va5), 1);
}
if XNN_UNLIKELY(k != 0) {
do {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const int8x8_t vw01234567 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567 = vmovl_s8(vw01234567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01234567));
const int32x4_t vxw4567 = vmovl_s16(vget_high_s16(vxw01234567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
k -= sizeof(float);
} while (k != 0);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = ((const float*) w + 4);
const float32x4_t vscale4567 = vld1q_f32(w); w = ((const float*) w + 4);
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,547 | 43.218845 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-aarch64-neonfma-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__aarch64_neonfma_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vfmaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vfmaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vfmaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vfmaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vfmaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vfmaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vfmaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vfmaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vfmaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vfmaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vfmaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vfmaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,526 | 39.588028 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
vacc4x01234567 = _mm256_mul_ps(vacc4x01234567, vscale01234567);
vacc5x01234567 = _mm256_mul_ps(vacc5x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 7,703 | 34.018182 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-neon-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__neon_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,042 | 39.685811 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-neon-lane-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__neon_lane_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c0, va0, 0);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c0, va1, 0);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c0, va2, 0);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c0, va3, 0);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c0, va4, 0);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c0, va5, 0);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c0, va0, 0);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c0, va1, 0);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c0, va2, 0);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c0, va3, 0);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c0, va4, 0);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c0, va5, 0);
vacc0x0123 = vmlaq_lane_f32(vacc0x0123, vb0123c1, va0, 1);
vacc1x0123 = vmlaq_lane_f32(vacc1x0123, vb0123c1, va1, 1);
vacc2x0123 = vmlaq_lane_f32(vacc2x0123, vb0123c1, va2, 1);
vacc3x0123 = vmlaq_lane_f32(vacc3x0123, vb0123c1, va3, 1);
vacc4x0123 = vmlaq_lane_f32(vacc4x0123, vb0123c1, va4, 1);
vacc5x0123 = vmlaq_lane_f32(vacc5x0123, vb0123c1, va5, 1);
vacc0x4567 = vmlaq_lane_f32(vacc0x4567, vb4567c1, va0, 1);
vacc1x4567 = vmlaq_lane_f32(vacc1x4567, vb4567c1, va1, 1);
vacc2x4567 = vmlaq_lane_f32(vacc2x4567, vb4567c1, va2, 1);
vacc3x4567 = vmlaq_lane_f32(vacc3x4567, vb4567c1, va3, 1);
vacc4x4567 = vmlaq_lane_f32(vacc4x4567, vb4567c1, va4, 1);
vacc5x4567 = vmlaq_lane_f32(vacc5x4567, vb4567c1, va5, 1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,515 | 39.549296 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-6x8-minmax-neonfma-dup-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/neon-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_6x8__neonfma_dup_ld64(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
a5 = a4;
c5 = c4;
}
do {
float32x4_t vacc0x0123 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc0x4567 = vld1q_f32(w); w = (const float*) w + 4;
float32x4_t vacc1x0123 = vacc0x0123;
float32x4_t vacc1x4567 = vacc0x4567;
float32x4_t vacc2x0123 = vacc0x0123;
float32x4_t vacc2x4567 = vacc0x4567;
float32x4_t vacc3x0123 = vacc0x0123;
float32x4_t vacc3x4567 = vacc0x4567;
float32x4_t vacc4x0123 = vacc0x0123;
float32x4_t vacc4x4567 = vacc0x4567;
float32x4_t vacc5x0123 = vacc0x0123;
float32x4_t vacc5x4567 = vacc0x4567;
size_t k = kc;
for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
const float32x2_t va0 = vld1_f32(a0); a0 += 2;
const float32x2_t va1 = vld1_f32(a1); a1 += 2;
const float32x2_t va2 = vld1_f32(a2); a2 += 2;
const float32x2_t va3 = vld1_f32(a3); a3 += 2;
const float32x2_t va4 = vld1_f32(a4); a4 += 2;
const float32x2_t va5 = vld1_f32(a5); a5 += 2;
const int8x8_t vw01234567c0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vw01234567c1 = vld1_s8(w); w = (const int8_t*) w + 8;
const int16x8_t vxw01234567c0 = vmovl_s8(vw01234567c0);
const int16x8_t vxw01234567c1 = vmovl_s8(vw01234567c1);
const int32x4_t vxw0123c0 = vmovl_s16(vget_low_s16(vxw01234567c0));
const int32x4_t vxw4567c0 = vmovl_s16(vget_high_s16(vxw01234567c0));
const int32x4_t vxw0123c1 = vmovl_s16(vget_low_s16(vxw01234567c1));
const int32x4_t vxw4567c1 = vmovl_s16(vget_high_s16(vxw01234567c1));
const float32x4_t vb0123c0 = vcvtq_f32_s32(vxw0123c0);
const float32x4_t vb0123c1 = vcvtq_f32_s32(vxw0123c1);
const float32x4_t vb4567c0 = vcvtq_f32_s32(vxw4567c0);
const float32x4_t vb4567c1 = vcvtq_f32_s32(vxw4567c1);
const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
const float32x4_t va4c0 = vdupq_lane_f32(va4, 0);
const float32x4_t va5c0 = vdupq_lane_f32(va5, 0);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c0, vb0123c0);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c0, vb0123c0);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c0, vb0123c0);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c0, vb4567c0);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c0, vb4567c0);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c0, vb4567c0);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0);
const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
const float32x4_t va4c1 = vdupq_lane_f32(va4, 1);
const float32x4_t va5c1 = vdupq_lane_f32(va5, 1);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1c1, vb0123c1);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2c1, vb0123c1);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3c1, vb0123c1);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4c1, vb0123c1);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5c1, vb0123c1);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1c1, vb4567c1);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2c1, vb4567c1);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3c1, vb4567c1);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4c1, vb4567c1);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5c1, vb4567c1);
}
if XNN_UNLIKELY(k != 0) {
const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
const int8x8_t vw01230123 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int8x8_t vw45674567 = vreinterpret_s8_u32(vld1_dup_u32(w)); w = (const int8_t*) w + 4;
const int16x8_t vxw01230123 = vmovl_s8(vw01230123);
const int16x8_t vxw45674567 = vmovl_s8(vw45674567);
const int32x4_t vxw0123 = vmovl_s16(vget_low_s16(vxw01230123));
const int32x4_t vxw4567 = vmovl_s16(vget_low_s16(vxw45674567));
const float32x4_t vb0123 = vcvtq_f32_s32(vxw0123);
const float32x4_t vb4567 = vcvtq_f32_s32(vxw4567);
vacc0x0123 = vfmaq_f32(vacc0x0123, va0, vb0123);
vacc1x0123 = vfmaq_f32(vacc1x0123, va1, vb0123);
vacc2x0123 = vfmaq_f32(vacc2x0123, va2, vb0123);
vacc3x0123 = vfmaq_f32(vacc3x0123, va3, vb0123);
vacc4x0123 = vfmaq_f32(vacc4x0123, va4, vb0123);
vacc5x0123 = vfmaq_f32(vacc5x0123, va5, vb0123);
vacc0x4567 = vfmaq_f32(vacc0x4567, va0, vb4567);
vacc1x4567 = vfmaq_f32(vacc1x4567, va1, vb4567);
vacc2x4567 = vfmaq_f32(vacc2x4567, va2, vb4567);
vacc3x4567 = vfmaq_f32(vacc3x4567, va3, vb4567);
vacc4x4567 = vfmaq_f32(vacc4x4567, va4, vb4567);
vacc5x4567 = vfmaq_f32(vacc5x4567, va5, vb4567);
}
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
vacc2x0123 = vmulq_f32(vacc2x0123, vscale0123);
vacc3x0123 = vmulq_f32(vacc3x0123, vscale0123);
vacc4x0123 = vmulq_f32(vacc4x0123, vscale0123);
vacc5x0123 = vmulq_f32(vacc5x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
vacc2x4567 = vmulq_f32(vacc2x4567, vscale4567);
vacc3x4567 = vmulq_f32(vacc3x4567, vscale4567);
vacc4x4567 = vmulq_f32(vacc4x4567, vscale4567);
vacc5x4567 = vmulq_f32(vacc5x4567, vscale4567);
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
vacc4x0123 = vminq_f32(vacc4x0123, vmax);
vacc5x0123 = vminq_f32(vacc5x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
vacc1x4567 = vminq_f32(vacc1x4567, vmax);
vacc2x4567 = vminq_f32(vacc2x4567, vmax);
vacc3x4567 = vminq_f32(vacc3x4567, vmax);
vacc4x4567 = vminq_f32(vacc4x4567, vmax);
vacc5x4567 = vminq_f32(vacc5x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
if XNN_LIKELY(nc >= 8) {
vst1q_f32(c5, vacc5x0123);
vst1q_f32(c5 + 4, vacc5x4567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
vst1q_f32(c4, vacc4x0123);
vst1q_f32(c4 + 4, vacc4x4567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
vst1q_f32(c3, vacc3x0123);
vst1q_f32(c3 + 4, vacc3x4567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
vst1q_f32(c2, vacc2x0123);
vst1q_f32(c2 + 4, vacc2x4567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
vst1q_f32(c1, vacc1x0123);
vst1q_f32(c1 + 4, vacc1x4567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
vst1q_f32(c0, vacc0x0123);
vst1q_f32(c0 + 4, vacc0x4567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1q_f32(c5, vacc5x0123); c5 += 4;
vst1q_f32(c4, vacc4x0123); c4 += 4;
vst1q_f32(c3, vacc3x0123); c3 += 4;
vst1q_f32(c2, vacc2x0123); c2 += 4;
vst1q_f32(c1, vacc1x0123); c1 += 4;
vst1q_f32(c0, vacc0x0123); c0 += 4;
vacc5x0123 = vacc5x4567;
vacc4x0123 = vacc4x4567;
vacc3x0123 = vacc3x4567;
vacc2x0123 = vacc2x4567;
vacc1x0123 = vacc1x4567;
vacc0x0123 = vacc0x4567;
}
float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
if (nc & 2) {
vst1_f32(c5, vacc5x01); c5 += 2;
vst1_f32(c4, vacc4x01); c4 += 2;
vst1_f32(c3, vacc3x01); c3 += 2;
vst1_f32(c2, vacc2x01); c2 += 2;
vst1_f32(c1, vacc1x01); c1 += 2;
vst1_f32(c0, vacc0x01); c0 += 2;
vacc5x01 = vget_high_f32(vacc5x0123);
vacc4x01 = vget_high_f32(vacc4x0123);
vacc3x01 = vget_high_f32(vacc3x0123);
vacc2x01 = vget_high_f32(vacc2x0123);
vacc1x01 = vget_high_f32(vacc1x0123);
vacc0x01 = vget_high_f32(vacc0x0123);
}
if (nc & 1) {
vst1_lane_f32(c5, vacc5x01, 0);
vst1_lane_f32(c4, vacc4x01, 0);
vst1_lane_f32(c3, vacc3x01, 0);
vst1_lane_f32(c2, vacc2x01, 0);
vst1_lane_f32(c1, vacc1x01, 0);
vst1_lane_f32(c0, vacc0x01, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,045 | 39.695946 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-7x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_7x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_mul_ps(vacc1x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_mul_ps(vacc2x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_mul_ps(vacc3x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_mul_ps(vacc4x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_mul_ps(vacc5x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_mul_ps(vacc6x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 7,634 | 38.973822 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-7x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_7x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 7);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
vacc4x01234567 = _mm256_mul_ps(vacc4x01234567, vscale01234567);
vacc5x01234567 = _mm256_mul_ps(vacc5x01234567, vscale01234567);
vacc6x01234567 = _mm256_mul_ps(vacc6x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 8,740 | 34.82377 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-8x16-minmax-avx512skx-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx512-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_8x16__avx512skx_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
__m512 vacc0x0123456789ABCDEF = _mm512_loadu_ps(w);
__m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
__m512 vacc7x0123456789ABCDEF = vacc0x0123456789ABCDEF;
w = (const float*) w + 16;
size_t k = kc;
do {
const __m512i vbi0123456789ABCDEF = _mm512_cvtepi8_epi32(_mm_loadu_epi8(w));
const __m512 vb0123456789ABCDEF = _mm512_cvtepi32_ps(vbi0123456789ABCDEF);
w = (const int8_t*) w + 16;
const __m512 va0 = _mm512_set1_ps(*a0);
vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
const __m512 va1 = _mm512_set1_ps(*a1);
vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
const __m512 va2 = _mm512_set1_ps(*a2);
vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
const __m512 va3 = _mm512_set1_ps(*a3);
vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
const __m512 va4 = _mm512_set1_ps(*a4);
vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
const __m512 va5 = _mm512_set1_ps(*a5);
vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
const __m512 va6 = _mm512_set1_ps(*a6);
vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
const __m512 va7 = _mm512_set1_ps(*a7);
vacc7x0123456789ABCDEF = _mm512_fmadd_ps(va7, vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
a0 += 1;
a1 += 1;
a2 += 1;
a3 += 1;
a4 += 1;
a5 += 1;
a6 += 1;
a7 += 1;
k -= sizeof(float);
} while (k != 0);
const __m512 vscale0123456789ABCDEF = _mm512_loadu_ps((const float*) w + 0);
vacc0x0123456789ABCDEF = _mm512_mul_ps(vacc0x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_mul_ps(vacc1x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_mul_ps(vacc2x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_mul_ps(vacc3x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_mul_ps(vacc4x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_mul_ps(vacc5x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_mul_ps(vacc6x0123456789ABCDEF, vscale0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_mul_ps(vacc7x0123456789ABCDEF, vscale0123456789ABCDEF);
w = (const float*) w + 16;
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
vacc0x0123456789ABCDEF = _mm512_max_ps(vmin, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_max_ps(vmin, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_max_ps(vmin, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_max_ps(vmin, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_max_ps(vmin, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_max_ps(vmin, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_max_ps(vmin, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_max_ps(vmin, vacc7x0123456789ABCDEF);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
vacc0x0123456789ABCDEF = _mm512_min_ps(vmax, vacc0x0123456789ABCDEF);
vacc1x0123456789ABCDEF = _mm512_min_ps(vmax, vacc1x0123456789ABCDEF);
vacc2x0123456789ABCDEF = _mm512_min_ps(vmax, vacc2x0123456789ABCDEF);
vacc3x0123456789ABCDEF = _mm512_min_ps(vmax, vacc3x0123456789ABCDEF);
vacc4x0123456789ABCDEF = _mm512_min_ps(vmax, vacc4x0123456789ABCDEF);
vacc5x0123456789ABCDEF = _mm512_min_ps(vmax, vacc5x0123456789ABCDEF);
vacc6x0123456789ABCDEF = _mm512_min_ps(vmax, vacc6x0123456789ABCDEF);
vacc7x0123456789ABCDEF = _mm512_min_ps(vmax, vacc7x0123456789ABCDEF);
if XNN_LIKELY(nc >= 16) {
_mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a7 = (const float*) ((uintptr_t) a7 - kc);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
if (nc & 15) {
// Prepare mask for valid 32-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
_mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
_mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
_mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
_mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
_mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
_mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
_mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
_mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
}
nc = 0;
}
} while (nc != 0);
}
| 8,490 | 39.822115 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-gemm/gen/f32-qc8w-gemm-8x8-minmax-avx2-broadcast.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-gemm/avx-broadcast.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xnnpack/gemm.h>
void xnn_f32_qc8w_gemm_minmax_ukernel_8x8__avx2_broadcast(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const void* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 8);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
a3 = a2;
c3 = c2;
}
const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
a4 = a3;
c4 = c3;
}
const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr < 6) {
a5 = a4;
c5 = c4;
}
const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 6) {
a6 = a5;
c6 = c5;
}
const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
if XNN_UNPREDICTABLE(mr != 8) {
a7 = a6;
c7 = c6;
}
do {
__m256 vacc0x01234567 = _mm256_loadu_ps((const float*) w + 0);
__m256 vacc1x01234567 = vacc0x01234567;
__m256 vacc2x01234567 = vacc0x01234567;
__m256 vacc3x01234567 = vacc0x01234567;
__m256 vacc4x01234567 = vacc0x01234567;
__m256 vacc5x01234567 = vacc0x01234567;
__m256 vacc6x01234567 = vacc0x01234567;
__m256 vacc7x01234567 = vacc0x01234567;
w = (const float*) w + 8;
size_t k = kc;
do {
const __m256 va0 = _mm256_broadcast_ss(a0);
a0 += 1;
const __m256 va1 = _mm256_broadcast_ss(a1);
a1 += 1;
const __m256 va2 = _mm256_broadcast_ss(a2);
a2 += 1;
const __m256 va3 = _mm256_broadcast_ss(a3);
a3 += 1;
const __m256 va4 = _mm256_broadcast_ss(a4);
a4 += 1;
const __m256 va5 = _mm256_broadcast_ss(a5);
a5 += 1;
const __m256 va6 = _mm256_broadcast_ss(a6);
a6 += 1;
const __m256 va7 = _mm256_broadcast_ss(a7);
a7 += 1;
const __m256i vbi01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const void*) w));
const __m256 vb01234567 = _mm256_cvtepi32_ps(vbi01234567);
w = (const int8_t*) w + 8;
vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
k -= sizeof(float);
} while (k != 0);
const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w + 0);
vacc0x01234567 = _mm256_mul_ps(vacc0x01234567, vscale01234567);
vacc1x01234567 = _mm256_mul_ps(vacc1x01234567, vscale01234567);
vacc2x01234567 = _mm256_mul_ps(vacc2x01234567, vscale01234567);
vacc3x01234567 = _mm256_mul_ps(vacc3x01234567, vscale01234567);
vacc4x01234567 = _mm256_mul_ps(vacc4x01234567, vscale01234567);
vacc5x01234567 = _mm256_mul_ps(vacc5x01234567, vscale01234567);
vacc6x01234567 = _mm256_mul_ps(vacc6x01234567, vscale01234567);
vacc7x01234567 = _mm256_mul_ps(vacc7x01234567, vscale01234567);
w = (const float*) w + 8;
const __m256 vmin = _mm256_load_ps(params->avx.min);
vacc0x01234567 = _mm256_max_ps(vmin, vacc0x01234567);
vacc1x01234567 = _mm256_max_ps(vmin, vacc1x01234567);
vacc2x01234567 = _mm256_max_ps(vmin, vacc2x01234567);
vacc3x01234567 = _mm256_max_ps(vmin, vacc3x01234567);
vacc4x01234567 = _mm256_max_ps(vmin, vacc4x01234567);
vacc5x01234567 = _mm256_max_ps(vmin, vacc5x01234567);
vacc6x01234567 = _mm256_max_ps(vmin, vacc6x01234567);
vacc7x01234567 = _mm256_max_ps(vmin, vacc7x01234567);
const __m256 vmax = _mm256_load_ps(params->avx.max);
vacc0x01234567 = _mm256_min_ps(vmax, vacc0x01234567);
vacc1x01234567 = _mm256_min_ps(vmax, vacc1x01234567);
vacc2x01234567 = _mm256_min_ps(vmax, vacc2x01234567);
vacc3x01234567 = _mm256_min_ps(vmax, vacc3x01234567);
vacc4x01234567 = _mm256_min_ps(vmax, vacc4x01234567);
vacc5x01234567 = _mm256_min_ps(vmax, vacc5x01234567);
vacc6x01234567 = _mm256_min_ps(vmax, vacc6x01234567);
vacc7x01234567 = _mm256_min_ps(vmax, vacc7x01234567);
if XNN_LIKELY(nc >= 8) {
_mm256_storeu_ps(c7, vacc7x01234567);
c7 = (float*) ((uintptr_t) c7 + cn_stride);
_mm256_storeu_ps(c6, vacc6x01234567);
c6 = (float*) ((uintptr_t) c6 + cn_stride);
_mm256_storeu_ps(c5, vacc5x01234567);
c5 = (float*) ((uintptr_t) c5 + cn_stride);
_mm256_storeu_ps(c4, vacc4x01234567);
c4 = (float*) ((uintptr_t) c4 + cn_stride);
_mm256_storeu_ps(c3, vacc3x01234567);
c3 = (float*) ((uintptr_t) c3 + cn_stride);
_mm256_storeu_ps(c2, vacc2x01234567);
c2 = (float*) ((uintptr_t) c2 + cn_stride);
_mm256_storeu_ps(c1, vacc1x01234567);
c1 = (float*) ((uintptr_t) c1 + cn_stride);
_mm256_storeu_ps(c0, vacc0x01234567);
c0 = (float*) ((uintptr_t) c0 + cn_stride);
a7 = (const float*) ((uintptr_t) a7 - kc);
a6 = (const float*) ((uintptr_t) a6 - kc);
a5 = (const float*) ((uintptr_t) a5 - kc);
a4 = (const float*) ((uintptr_t) a4 - kc);
a3 = (const float*) ((uintptr_t) a3 - kc);
a2 = (const float*) ((uintptr_t) a2 - kc);
a1 = (const float*) ((uintptr_t) a1 - kc);
a0 = (const float*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
__m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
__m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
__m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
__m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
__m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
__m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
__m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
__m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
if (nc & 4) {
_mm_storeu_ps(c7, vacc7x0123);
_mm_storeu_ps(c6, vacc6x0123);
_mm_storeu_ps(c5, vacc5x0123);
_mm_storeu_ps(c4, vacc4x0123);
_mm_storeu_ps(c3, vacc3x0123);
_mm_storeu_ps(c2, vacc2x0123);
_mm_storeu_ps(c1, vacc1x0123);
_mm_storeu_ps(c0, vacc0x0123);
vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
c7 += 4;
c6 += 4;
c5 += 4;
c4 += 4;
c3 += 4;
c2 += 4;
c1 += 4;
c0 += 4;
}
if (nc & 2) {
_mm_storel_pi((__m64*) c7, vacc7x0123);
_mm_storel_pi((__m64*) c6, vacc6x0123);
_mm_storel_pi((__m64*) c5, vacc5x0123);
_mm_storel_pi((__m64*) c4, vacc4x0123);
_mm_storel_pi((__m64*) c3, vacc3x0123);
_mm_storel_pi((__m64*) c2, vacc2x0123);
_mm_storel_pi((__m64*) c1, vacc1x0123);
_mm_storel_pi((__m64*) c0, vacc0x0123);
vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
c7 += 2;
c6 += 2;
c5 += 2;
c4 += 2;
c3 += 2;
c2 += 2;
c1 += 2;
c0 += 2;
}
if (nc & 1) {
_mm_store_ss(c7, vacc7x0123);
_mm_store_ss(c6, vacc6x0123);
_mm_store_ss(c5, vacc5x0123);
_mm_store_ss(c4, vacc4x0123);
_mm_store_ss(c3, vacc3x0123);
_mm_store_ss(c2, vacc2x0123);
_mm_store_ss(c1, vacc1x0123);
_mm_store_ss(c0, vacc0x0123);
}
nc = 0;
}
} while (nc != 0);
}
| 9,778 | 35.488806 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-spmm/gen/f32-qc8w-spmm-1x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_qc8w_spmm_minmax_ukernel_1x1__scalar(
size_t mc,
size_t nc,
const float* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 1 * sizeof(float);
while (mc >= 1 * sizeof(float)) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
mc -= 1 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
}
}
| 2,994 | 30.197917 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-spmm/gen/f32-qc8w-spmm-2x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_qc8w_spmm_minmax_ukernel_2x1__scalar(
size_t mc,
size_t nc,
const float* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 2 * sizeof(float);
while (mc >= 2 * sizeof(float)) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
mc -= 2 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,652 | 32.64881 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-spmm/gen/f32-qc8w-spmm-4x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_qc8w_spmm_minmax_ukernel_4x1__scalar(
size_t mc,
size_t nc,
const float* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 4 * sizeof(float);
while (mc >= 4 * sizeof(float)) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc2x0 *= vscale0;
vacc3x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
vacc2 *= vscale;
vacc3 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
mc -= 4 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 9,353 | 33.773234 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-spmm/gen/f32-qc8w-spmm-8x1-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_qc8w_spmm_minmax_ukernel_8x1__scalar(
size_t mc,
size_t nc,
const float* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while (mc >= 8 * sizeof(float)) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
float vacc4x0 = vacc0x0;
float vacc5x0 = vacc0x0;
float vacc6x0 = vacc0x0;
float vacc7x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc4x0 += vi4 * vw0;
vacc5x0 += vi5 * vw0;
vacc6x0 += vi6 * vw0;
vacc7x0 += vi7 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc2x0 *= vscale0;
vacc3x0 *= vscale0;
vacc4x0 *= vscale0;
vacc5x0 *= vscale0;
vacc6x0 *= vscale0;
vacc7x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout4x0 = math_min_f32(vacc4x0, vmax);
float vout5x0 = math_min_f32(vacc5x0, vmax);
float vout6x0 = math_min_f32(vacc6x0, vmax);
float vout7x0 = math_min_f32(vacc7x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout4x0 = math_max_f32(vout4x0, vmin);
vout5x0 = math_max_f32(vout5x0, vmin);
vout6x0 = math_max_f32(vout6x0, vmin);
vout7x0 = math_max_f32(vout7x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
float vacc4 = vacc0;
float vacc5 = vacc0;
float vacc6 = vacc0;
float vacc7 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
vacc2 *= vscale;
vacc3 *= vscale;
vacc4 *= vscale;
vacc5 *= vscale;
vacc6 *= vscale;
vacc7 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc2x0 *= vscale0;
vacc3x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
vacc2 *= vscale;
vacc3 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
float vacc1x0 = vacc0x0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 1) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0x0 += vi0 * vw0;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
w = (const float*) w + 1;
vacc0x0 *= vscale0;
float vout0x0 = math_min_f32(vacc0x0, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[0];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 15,140 | 34.376168 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qc8w-spmm/gen/f32-qc8w-spmm-8x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_qc8w_spmm_minmax_ukernel_8x2__scalar(
size_t mc,
size_t nc,
const float* input,
const void* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while (mc >= 8 * sizeof(float)) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
float vacc0x1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc3x0 = vacc0x0;
float vacc3x1 = vacc0x1;
float vacc4x0 = vacc0x0;
float vacc4x1 = vacc0x1;
float vacc5x0 = vacc0x0;
float vacc5x1 = vacc0x1;
float vacc6x0 = vacc0x0;
float vacc6x1 = vacc0x1;
float vacc7x0 = vacc0x0;
float vacc7x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
const float vw1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc4x0 += vi4 * vw0;
vacc5x0 += vi5 * vw0;
vacc6x0 += vi6 * vw0;
vacc7x0 += vi7 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
vacc4x1 += vi4 * vw1;
vacc5x1 += vi5 * vw1;
vacc6x1 += vi6 * vw1;
vacc7x1 += vi7 * vw1;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc2x0 *= vscale0;
vacc3x0 *= vscale0;
vacc4x0 *= vscale0;
vacc5x0 *= vscale0;
vacc6x0 *= vscale0;
vacc7x0 *= vscale0;
vacc0x1 *= vscale1;
vacc1x1 *= vscale1;
vacc2x1 *= vscale1;
vacc3x1 *= vscale1;
vacc4x1 *= vscale1;
vacc5x1 *= vscale1;
vacc6x1 *= vscale1;
vacc7x1 *= vscale1;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout4x0 = math_min_f32(vacc4x0, vmax);
float vout5x0 = math_min_f32(vacc5x0, vmax);
float vout6x0 = math_min_f32(vacc6x0, vmax);
float vout7x0 = math_min_f32(vacc7x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
float vout4x1 = math_min_f32(vacc4x1, vmax);
float vout5x1 = math_min_f32(vacc5x1, vmax);
float vout6x1 = math_min_f32(vacc6x1, vmax);
float vout7x1 = math_min_f32(vacc7x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout4x0 = math_max_f32(vout4x0, vmin);
vout5x0 = math_max_f32(vout5x0, vmin);
vout6x0 = math_max_f32(vout6x0, vmin);
vout7x0 = math_max_f32(vout7x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
vout4x1 = math_max_f32(vout4x1, vmin);
vout5x1 = math_max_f32(vout5x1, vmin);
vout6x1 = math_max_f32(vout6x1, vmin);
vout7x1 = math_max_f32(vout7x1, vmin);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output[4] = vout4x1;
output[5] = vout5x1;
output[6] = vout6x1;
output[7] = vout7x1;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output[4] = vout4x1;
output[5] = vout5x1;
output[6] = vout6x1;
output[7] = vout7x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
float vacc4 = vacc0;
float vacc5 = vacc0;
float vacc6 = vacc0;
float vacc7 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
vacc2 *= vscale;
vacc3 *= vscale;
vacc4 *= vscale;
vacc5 *= vscale;
vacc6 *= vscale;
vacc7 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
float vacc0x1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x1 = vacc0x1;
float vacc3x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
const float vw1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc2x0 *= vscale0;
vacc3x0 *= vscale0;
vacc0x1 *= vscale1;
vacc1x1 *= vscale1;
vacc2x1 *= vscale1;
vacc3x1 *= vscale1;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
vacc2 *= vscale;
vacc3 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
float vacc0x1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
const float vw1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc0x0 *= vscale0;
vacc1x0 *= vscale0;
vacc0x1 *= vscale1;
vacc1x1 *= vscale1;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
vacc1 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const void* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = unaligned_indexed_load_f32(w, 0);
float vacc0x1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = (float) ((const int8_t*) w)[0];
const float vw1 = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 2;
vacc0x0 += vi0 * vw0;
vacc0x1 += vi0 * vw1;
} while (--nnz != 0);
}
const float vscale0 = unaligned_indexed_load_f32(w, 0);
const float vscale1 = unaligned_indexed_load_f32(w, 1);
w = (const float*) w + 2;
vacc0x0 *= vscale0;
vacc0x1 *= vscale1;
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = unaligned_load_f32(w);
w = (const float*) w + 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = (float) ((const int8_t*) w)[1];
w = (const int8_t*) w + 1;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vscale = unaligned_load_f32(w);
w = (const float*) w + 1;
vacc0 *= vscale;
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 19,290 | 35.398113 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,805 | 33.917431 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
__m256 vxGHIJKLMN = _mm256_loadu_ps(input + 16);
input += 24;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vxGHIJKLMN = _mm256_mul_ps(vxGHIJKLMN, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
vxGHIJKLMN = _mm256_min_ps(vxGHIJKLMN, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
const __m256i vaccGHIJKLMN = _mm256_cvtps_epi32(vxGHIJKLMN);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
__m128i vyGHIJKLMN = _mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extractf128_si256(vaccGHIJKLMN, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
vyGHIJKLMN = _mm_packs_epi16(vyGHIJKLMN, vyGHIJKLMN);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epi8(vyGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,410 | 36.381356 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
__m256 vxGHIJKLMN = _mm256_loadu_ps(input + 16);
__m256 vxOPQRSTUV = _mm256_loadu_ps(input + 24);
input += 32;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vxGHIJKLMN = _mm256_mul_ps(vxGHIJKLMN, vscale);
vxOPQRSTUV = _mm256_mul_ps(vxOPQRSTUV, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
vxGHIJKLMN = _mm256_min_ps(vxGHIJKLMN, voutput_max_less_zero_point);
vxOPQRSTUV = _mm256_min_ps(vxOPQRSTUV, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
const __m256i vaccGHIJKLMN = _mm256_cvtps_epi32(vxGHIJKLMN);
const __m256i vaccOPQRSTUV = _mm256_cvtps_epi32(vxOPQRSTUV);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
__m128i vyGHIJKLMN = _mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extractf128_si256(vaccGHIJKLMN, 1));
__m128i vyOPQRSTUV = _mm_packs_epi32(_mm256_castsi256_si128(vaccOPQRSTUV), _mm256_extractf128_si256(vaccOPQRSTUV, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vyOPQRSTUV = _mm_adds_epi16(vyOPQRSTUV, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
__m128i vyGHIJKLMNOPQRSTUV = _mm_packs_epi16(vyGHIJKLMN, vyOPQRSTUV);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = _mm_max_epi8(vyGHIJKLMNOPQRSTUV, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,881 | 38.370968 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,625 | 31.02439 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx2_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
input += 16;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,607 | 32.100917 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx2_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
input += 32;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
const __m256i vy02461357 = _mm256_packs_epi16(vacc0213, vacc4657);
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
vy01234567 = _mm256_max_epi8(vy01234567, voutput_min);
_mm256_storeu_si256((__m256i*) output, vy01234567);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,258 | 34.491667 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx2-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx2_x48(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
__m256 vx89 = _mm256_loadu_ps(input + 32);
__m256 vxAB = _mm256_loadu_ps(input + 40);
input += 48;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx89 = _mm256_mul_ps(vx89, vscale);
vxAB = _mm256_mul_ps(vxAB, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
__m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
const __m256i vy02461357 = _mm256_packs_epi16(vacc0213, vacc4657);
const __m128i vy8A9B = _mm_packs_epi16(_mm256_castsi256_si128(vacc8A9B), _mm256_extracti128_si256(vacc8A9B, 1));
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
__m128i vy89AB = _mm_shuffle_epi32(vy8A9B, _MM_SHUFFLE(3, 1, 2, 0));
vy01234567 = _mm256_max_epi8(vy01234567, voutput_min);
vy89AB = _mm_max_epi8(vy89AB, _mm256_castsi256_si128(voutput_min));
_mm256_storeu_si256((__m256i*) output, vy01234567);
_mm_storeu_si128((__m128i*) (output + 32), vy89AB);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,101 | 37.074627 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx2_x64(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
__m256 vx89 = _mm256_loadu_ps(input + 32);
__m256 vxAB = _mm256_loadu_ps(input + 40);
__m256 vxCD = _mm256_loadu_ps(input + 48);
__m256 vxEF = _mm256_loadu_ps(input + 56);
input += 64;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx89 = _mm256_mul_ps(vx89, vscale);
vxAB = _mm256_mul_ps(vxAB, vscale);
vxCD = _mm256_mul_ps(vxCD, vscale);
vxEF = _mm256_mul_ps(vxEF, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
vxCD = _mm256_min_ps(vxCD, voutput_max_less_zero_point);
vxEF = _mm256_min_ps(vxEF, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
const __m256i vaccCD = _mm256_cvtps_epi32(vxCD);
const __m256i vaccEF = _mm256_cvtps_epi32(vxEF);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
__m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
__m256i vaccCEDF = _mm256_packs_epi32(vaccCD, vaccEF);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
vaccCEDF = _mm256_adds_epi16(vaccCEDF, voutput_zero_point);
const __m256i vy02461357 = _mm256_packs_epi16(vacc0213, vacc4657);
const __m256i vy8ACE9BDF = _mm256_packs_epi16(vacc8A9B, vaccCEDF);
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
__m256i vy89ABCDEF = _mm256_permutevar8x32_epi32(vy8ACE9BDF, vshuffle_mask);
vy01234567 = _mm256_max_epi8(vy01234567, voutput_min);
vy89ABCDEF = _mm256_max_epi8(vy89ABCDEF, voutput_min);
_mm256_storeu_si256((__m256i*) output, vy01234567);
_mm256_storeu_si256((__m256i*) (output + 32), vy89ABCDEF);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,582 | 37.770833 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx512skx-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx512skx_x128(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
__m512 vxGHIJ = _mm512_loadu_ps(input + 64);
__m512 vxKLMN = _mm512_loadu_ps(input + 80);
__m512 vxOPQR = _mm512_loadu_ps(input + 96);
__m512 vxSTUV = _mm512_loadu_ps(input + 112);
input += 128;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm512_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm512_mul_ps(vxKLMN, vscale);
vxOPQR = _mm512_mul_ps(vxOPQR, vscale);
vxSTUV = _mm512_mul_ps(vxSTUV, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm512_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm512_min_ps(vxKLMN, voutput_max_less_zero_point);
vxOPQR = _mm512_min_ps(vxOPQR, voutput_max_less_zero_point);
vxSTUV = _mm512_min_ps(vxSTUV, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
const __m512i vaccGHIJ = _mm512_cvtps_epi32(vxGHIJ);
const __m512i vaccKLMN = _mm512_cvtps_epi32(vxKLMN);
const __m512i vaccOPQR = _mm512_cvtps_epi32(vxOPQR);
const __m512i vaccSTUV = _mm512_cvtps_epi32(vxSTUV);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
__m512i vaccGKHLIMJN = _mm512_packs_epi32(vaccGHIJ, vaccKLMN);
__m512i vaccOSPTQURV = _mm512_packs_epi32(vaccOPQR, vaccSTUV);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
vaccGKHLIMJN = _mm512_adds_epi16(vaccGKHLIMJN, voutput_zero_point);
vaccOSPTQURV = _mm512_adds_epi16(vaccOSPTQURV, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packs_epi16(vacc04152637, vacc8C9DAEBF);
__m512i vyGKOSHLPTIMQUJNRV = _mm512_packs_epi16(vaccGKHLIMJN, vaccOSPTQURV);
vy048C159D26AE37BF = _mm512_max_epi8(vy048C159D26AE37BF, voutput_min);
vyGKOSHLPTIMQUJNRV = _mm512_max_epi8(vyGKOSHLPTIMQUJNRV, voutput_min);
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
const __m512i vyGHIJKLMNOPQRSTUV = _mm512_permutexvar_epi32(vshuffle512_mask, vyGKOSHLPTIMQUJNRV);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm512_storeu_si512(output + 64, vyGHIJKLMNOPQRSTUV);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 6,064 | 43.595588 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx512skx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx512skx_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m256i vshuffle256_mask = _mm256_load_si256((const __m256i*) params->avx512.shuffle256_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx512.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
input += 32;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
__m256i vy04261537 = _mm256_packs_epi16(_mm512_castsi512_si256(vacc04152637), _mm512_extracti32x8_epi32(vacc04152637, 1));
vy04261537 = _mm256_max_epi8(vy04261537, voutput_min);
const __m256i vy01234567 = _mm256_permutevar8x32_epi32(vy04261537, vshuffle256_mask);
_mm256_storeu_si256((__m256i*) output, vy01234567);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm256_castsi256_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm256_castsi256_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 4,088 | 39.088235 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx512skx-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx512skx_x64(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
input += 64;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packs_epi16(vacc04152637, vacc8C9DAEBF);
vy048C159D26AE37BF = _mm512_max_epi8(vy048C159D26AE37BF, voutput_min);
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 4,603 | 40.107143 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-avx512skx-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__avx512skx_x96(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m256i vshuffle256_mask = _mm256_load_si256((const __m256i*) params->avx512.shuffle256_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
__m512 vxGHIJ = _mm512_loadu_ps(input + 64);
__m512 vxKLMN = _mm512_loadu_ps(input + 80);
input += 96;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm512_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm512_mul_ps(vxKLMN, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm512_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm512_min_ps(vxKLMN, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
const __m512i vaccGHIJ = _mm512_cvtps_epi32(vxGHIJ);
const __m512i vaccKLMN = _mm512_cvtps_epi32(vxKLMN);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
__m512i vaccGKHLIMJN = _mm512_packs_epi32(vaccGHIJ, vaccKLMN);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
vaccGKHLIMJN = _mm512_adds_epi16(vaccGKHLIMJN, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packs_epi16(vacc04152637, vacc8C9DAEBF);
__m256i vyGKIMHLJN = _mm256_packs_epi16(_mm512_castsi512_si256(vaccGKHLIMJN), _mm512_extracti32x8_epi32(vaccGKHLIMJN, 1));
vy048C159D26AE37BF = _mm512_max_epi8(vy048C159D26AE37BF, voutput_min);
vyGKIMHLJN = _mm256_max_epi8(vyGKIMHLJN, _mm512_castsi512_si256(voutput_min));
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
const __m256i vyGHIJKLMN = _mm256_permutevar8x32_epi32(vyGKIMHLJN, vshuffle256_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm256_storeu_si256((__m256i*) (output + 64), vyGHIJKLMN);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packs_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epi8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 5,638 | 43.401575 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neon_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 4,711 | 37.622951 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neon_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
vxGHIJ = vaddq_f32(vxGHIJ, vmagic_bias);
vxKLMN = vaddq_f32(vxKLMN, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int32x4_t vaccGHIJ = vqsubq_s32(vreinterpretq_s32_f32(vxGHIJ), vmagic_bias_less_zero_point);
const int32x4_t vaccKLMN = vqsubq_s32(vreinterpretq_s32_f32(vxKLMN), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
const int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t vyGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = vmax_s8(vyGHIJKLMN, vget_low_s8(voutput_min));
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = vmin_s8(vyGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
vst1_s8(output, vyGHIJKLMN); output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 5,516 | 39.866667 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neon_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
float32x4_t vxOPQR = vld1q_f32(input); input += 4;
float32x4_t vxSTUV = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vxOPQR = vmulq_f32(vxOPQR, vscale);
vxSTUV = vmulq_f32(vxSTUV, vscale);
vx0123 = vaddq_f32(vx0123, vmagic_bias);
vx4567 = vaddq_f32(vx4567, vmagic_bias);
vx89AB = vaddq_f32(vx89AB, vmagic_bias);
vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
vxGHIJ = vaddq_f32(vxGHIJ, vmagic_bias);
vxKLMN = vaddq_f32(vxKLMN, vmagic_bias);
vxOPQR = vaddq_f32(vxOPQR, vmagic_bias);
vxSTUV = vaddq_f32(vxSTUV, vmagic_bias);
const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
const int32x4_t vaccGHIJ = vqsubq_s32(vreinterpretq_s32_f32(vxGHIJ), vmagic_bias_less_zero_point);
const int32x4_t vaccKLMN = vqsubq_s32(vreinterpretq_s32_f32(vxKLMN), vmagic_bias_less_zero_point);
const int32x4_t vaccOPQR = vqsubq_s32(vreinterpretq_s32_f32(vxOPQR), vmagic_bias_less_zero_point);
const int32x4_t vaccSTUV = vqsubq_s32(vreinterpretq_s32_f32(vxSTUV), vmagic_bias_less_zero_point);
const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
const int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
const int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t vyGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = vmaxq_s8(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = vminq_s8(vyGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
vst1q_s8(output, vyGHIJKLMNOPQRSTUV); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 6,161 | 41.791667 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neon_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, voutput_min);
vy = vmin_s8(vy, voutput_max);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
vx_lo = vaddq_f32(vx_lo, vmagic_bias);
vx_hi = vaddq_f32(vx_hi, vmagic_bias);
const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, voutput_min);
vy = vmin_s8(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 3,123 | 33.711111 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neonv8-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neonv8_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neonv8.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 4,096 | 34.626087 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neonv8-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neonv8_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neonv8.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
const int32x4_t vaccGHIJ = vcvtnq_s32_f32(vxGHIJ);
const int32x4_t vaccKLMN = vcvtnq_s32_f32(vxKLMN);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t vyGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = vmax_s8(vyGHIJKLMN, vget_low_s8(voutput_min));
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = vmin_s8(vyGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
vst1_s8(output, vyGHIJKLMN); output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 4,774 | 36.598425 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neonv8-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neonv8_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neonv8.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
float32x4_t vxOPQR = vld1q_f32(input); input += 4;
float32x4_t vxSTUV = vld1q_f32(input); input += 4;
vx0123 = vmulq_f32(vx0123, vscale);
vx4567 = vmulq_f32(vx4567, vscale);
vx89AB = vmulq_f32(vx89AB, vscale);
vxCDEF = vmulq_f32(vxCDEF, vscale);
vxGHIJ = vmulq_f32(vxGHIJ, vscale);
vxKLMN = vmulq_f32(vxKLMN, vscale);
vxOPQR = vmulq_f32(vxOPQR, vscale);
vxSTUV = vmulq_f32(vxSTUV, vscale);
const int32x4_t vacc0123 = vcvtnq_s32_f32(vx0123);
const int32x4_t vacc4567 = vcvtnq_s32_f32(vx4567);
const int32x4_t vacc89AB = vcvtnq_s32_f32(vx89AB);
const int32x4_t vaccCDEF = vcvtnq_s32_f32(vxCDEF);
const int32x4_t vaccGHIJ = vcvtnq_s32_f32(vxGHIJ);
const int32x4_t vaccKLMN = vcvtnq_s32_f32(vxKLMN);
const int32x4_t vaccOPQR = vcvtnq_s32_f32(vxOPQR);
const int32x4_t vaccSTUV = vcvtnq_s32_f32(vxSTUV);
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
int8x16_t vy0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t vyGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
vy0123456789ABCDEF = vmaxq_s8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = vmaxq_s8(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = vminq_s8(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = vminq_s8(vyGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vy0123456789ABCDEF); output += 16;
vst1q_s8(output, vyGHIJKLMNOPQRSTUV); output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, vget_low_s8(voutput_min));
vy = vmin_s8(vy, vget_low_s8(voutput_max));
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 5,292 | 38.207407 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-neonv8-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/neonv8.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__neonv8_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vscale = vld1q_dup_f32(¶ms->neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->neonv8.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx_lo = vld1q_f32(input); input += 4;
float32x4_t vx_hi = vld1q_f32(input); input += 4;
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, voutput_min);
vy = vmin_s8(vy, voutput_max);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
float32x4_t vx_lo = vld1q_f32(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
float32x4_t vx_hi = vld1q_f32(x_hi);
vx_lo = vmulq_f32(vx_lo, vscale);
vx_hi = vmulq_f32(vx_hi, vscale);
const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
vy = vmax_s8(vy, voutput_min);
vy = vmin_s8(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(float))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 2,762 | 31.505882 | 91 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-fmagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_fmagic_x1(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,446 | 28.530612 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-fmagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_fmagic_x2(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output = (int8_t) vy;
}
}
| 2,139 | 27.918919 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-fmagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_fmagic_x3(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,530 | 28.430233 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-fmagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_fmagic_x4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx3 = math_max_f32(vx3, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx3 = math_min_f32(vx3, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output[3] = (int8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,835 | 29.170213 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-imagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_imagic_x1(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,348 | 26.530612 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-imagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_imagic_x2(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output = (int8_t) vy;
}
}
| 1,973 | 25.675676 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-imagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_imagic_x3(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy2 = math_max_s32(vy2, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy2 = math_min_s32(vy2, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,330 | 26.104651 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-imagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-imagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_imagic_x4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_imagic.scale;
const float vmagic_bias = params->scalar_imagic.magic_bias;
const int32_t vmagic_min = params->scalar_imagic.magic_min;
const int32_t vmagic_max = params->scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->scalar_imagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 = math_max_s32(vy0, vmagic_min);
vy1 = math_max_s32(vy1, vmagic_min);
vy2 = math_max_s32(vy2, vmagic_min);
vy3 = math_max_s32(vy3, vmagic_min);
vy0 = math_min_s32(vy0, vmagic_max);
vy1 = math_min_s32(vy1, vmagic_max);
vy2 = math_min_s32(vy2, vmagic_max);
vy3 = math_min_s32(vy3, vmagic_max);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output[3] = (int8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy = math_max_s32(vy, vmagic_min);
vy = math_min_s32(vy, vmagic_max);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,601 | 26.680851 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-lrintf-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_lrintf_x1(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,343 | 27 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-lrintf-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_lrintf_x2(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output = (int8_t) vy;
}
}
| 1,951 | 26.885714 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-lrintf-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_lrintf_x3(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
int32_t vy2 = (int32_t) lrintf(vx2);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
vy2 += voutput_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,298 | 27.382716 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-scalar-lrintf-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-lrintf.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__scalar_lrintf_x4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar_lrintf.output_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = math_max_f32(vx0, voutput_min_less_zero_point);
vx1 = math_max_f32(vx1, voutput_min_less_zero_point);
vx2 = math_max_f32(vx2, voutput_min_less_zero_point);
vx3 = math_max_f32(vx3, voutput_min_less_zero_point);
vx0 = math_min_f32(vx0, voutput_max_less_zero_point);
vx1 = math_min_f32(vx1, voutput_max_less_zero_point);
vx2 = math_min_f32(vx2, voutput_max_less_zero_point);
vx3 = math_min_f32(vx3, voutput_max_less_zero_point);
int32_t vy0 = (int32_t) lrintf(vx0);
int32_t vy1 = (int32_t) lrintf(vx1);
int32_t vy2 = (int32_t) lrintf(vx2);
int32_t vy3 = (int32_t) lrintf(vx3);
vy0 += voutput_zero_point;
vy1 += voutput_zero_point;
vy2 += voutput_zero_point;
vy3 += voutput_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output[3] = (int8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = math_max_f32(vx, voutput_min_less_zero_point);
vx = math_min_f32(vx, voutput_max_less_zero_point);
int32_t vy = (int32_t) lrintf(vx);
vy += voutput_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,561 | 28.113636 | 93 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse2_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vy01234567 = _mm_max_epi16(vy01234567, voutput_min);
vy89ABCDEF = _mm_max_epi16(vy89ABCDEF, voutput_min);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) vy_lo;
}
}
}
}
| 4,329 | 32.053435 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse2_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vy01234567 = _mm_max_epi16(vy01234567, voutput_min);
vy89ABCDEF = _mm_max_epi16(vy89ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epi16(vyGHIJKLMN, voutput_min);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
vyGHIJKLMN = _mm_packs_epi16(vyGHIJKLMN, vyGHIJKLMN);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) vy_lo;
}
}
}
}
| 5,029 | 33.930556 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse2_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
__m128 vxOPQR = _mm_loadu_ps(input + 24);
__m128 vxSTUV = _mm_loadu_ps(input + 28);
input += 32;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vxOPQR = _mm_mul_ps(vxOPQR, vscale);
vxSTUV = _mm_mul_ps(vxSTUV, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
vxOPQR = _mm_min_ps(vxOPQR, voutput_max_less_zero_point);
vxSTUV = _mm_min_ps(vxSTUV, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
const __m128i vyOPQR = _mm_cvtps_epi32(vxOPQR);
const __m128i vySTUV = _mm_cvtps_epi32(vxSTUV);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
__m128i vyOPQRSTUV = _mm_packs_epi32(vyOPQR, vySTUV);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vyOPQRSTUV = _mm_adds_epi16(vyOPQRSTUV, voutput_zero_point);
vy01234567 = _mm_max_epi16(vy01234567, voutput_min);
vy89ABCDEF = _mm_max_epi16(vy89ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epi16(vyGHIJKLMN, voutput_min);
vyOPQRSTUV = _mm_max_epi16(vyOPQRSTUV, voutput_min);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
__m128i vyGHIJKLMNOPQRSTUV = _mm_packs_epi16(vyGHIJKLMN, vyOPQRSTUV);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) vy_lo;
}
}
}
}
| 5,635 | 35.36129 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse2_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_max_epi16(vy, voutput_min);
vy = _mm_packs_epi16(vy, vy);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
{
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
output += 2;
vy_lo >>= 16;
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) vy_lo;
}
}
}
}
| 2,919 | 30.06383 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse41_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vy, 0));
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,253 | 32.496063 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse41-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse41_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
vyGHIJKLMN = _mm_packs_epi16(vyGHIJKLMN, vyGHIJKLMN);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epi8(vyGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vy, 0));
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,952 | 34.378571 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse41_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
__m128 vxOPQR = _mm_loadu_ps(input + 24);
__m128 vxSTUV = _mm_loadu_ps(input + 28);
input += 32;
vx0123 = _mm_mul_ps(vx0123, vscale);
vx4567 = _mm_mul_ps(vx4567, vscale);
vx89AB = _mm_mul_ps(vx89AB, vscale);
vxCDEF = _mm_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm_mul_ps(vxKLMN, vscale);
vxOPQR = _mm_mul_ps(vxOPQR, vscale);
vxSTUV = _mm_mul_ps(vxSTUV, vscale);
vx0123 = _mm_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm_min_ps(vxKLMN, voutput_max_less_zero_point);
vxOPQR = _mm_min_ps(vxOPQR, voutput_max_less_zero_point);
vxSTUV = _mm_min_ps(vxSTUV, voutput_max_less_zero_point);
const __m128i vy0123 = _mm_cvtps_epi32(vx0123);
const __m128i vy4567 = _mm_cvtps_epi32(vx4567);
const __m128i vy89AB = _mm_cvtps_epi32(vx89AB);
const __m128i vyCDEF = _mm_cvtps_epi32(vxCDEF);
const __m128i vyGHIJ = _mm_cvtps_epi32(vxGHIJ);
const __m128i vyKLMN = _mm_cvtps_epi32(vxKLMN);
const __m128i vyOPQR = _mm_cvtps_epi32(vxOPQR);
const __m128i vySTUV = _mm_cvtps_epi32(vxSTUV);
__m128i vy01234567 = _mm_packs_epi32(vy0123, vy4567);
__m128i vy89ABCDEF = _mm_packs_epi32(vy89AB, vyCDEF);
__m128i vyGHIJKLMN = _mm_packs_epi32(vyGHIJ, vyKLMN);
__m128i vyOPQRSTUV = _mm_packs_epi32(vyOPQR, vySTUV);
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vyOPQRSTUV = _mm_adds_epi16(vyOPQRSTUV, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packs_epi16(vy01234567, vy89ABCDEF);
__m128i vyGHIJKLMNOPQRSTUV = _mm_packs_epi16(vyGHIJKLMN, vyOPQRSTUV);
vy0123456789ABCDEF = _mm_max_epi8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = _mm_max_epi8(vyGHIJKLMNOPQRSTUV, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vy, 0));
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,517 | 35.786667 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/sse.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__sse41_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx_lo = _mm_loadu_ps(input);
__m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx_lo = _mm_loadu_ps(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
__m128 vx_hi = _mm_loadu_ps(x_hi);
vx_lo = _mm_mul_ps(vx_lo, vscale);
vx_hi = _mm_mul_ps(vx_hi, vscale);
vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point);
vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point);
const __m128i vy_lo = _mm_cvtps_epi32(vx_lo);
const __m128i vy_hi = _mm_cvtps_epi32(vx_hi);
__m128i vy = _mm_packs_epi32(vy_lo, vy_hi);
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packs_epi16(vy, vy);
vy = _mm_max_epi8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vy, 0));
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,885 | 30.714286 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasm-fmagic-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasm_fmagic_x1(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,464 | 28.897959 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasm-fmagic-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasm_fmagic_x2(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
vx0 *= vscale;
vx1 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output = (int8_t) vy;
}
}
| 2,197 | 28.702703 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasm-fmagic-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasm_fmagic_x3(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx2 = __builtin_wasm_max_f32(vx2, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx2 = __builtin_wasm_min_f32(vx2, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,608 | 29.337209 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasm-fmagic-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/scalar-fmagic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasm_fmagic_x4(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vscale = params->scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_zero_point = params->scalar_fmagic.magic_bias_less_zero_point;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
vx0 *= vscale;
vx1 *= vscale;
vx2 *= vscale;
vx3 *= vscale;
vx0 = __builtin_wasm_max_f32(vx0, voutput_min_less_zero_point);
vx1 = __builtin_wasm_max_f32(vx1, voutput_min_less_zero_point);
vx2 = __builtin_wasm_max_f32(vx2, voutput_min_less_zero_point);
vx3 = __builtin_wasm_max_f32(vx3, voutput_min_less_zero_point);
vx0 = __builtin_wasm_min_f32(vx0, voutput_max_less_zero_point);
vx1 = __builtin_wasm_min_f32(vx1, voutput_max_less_zero_point);
vx2 = __builtin_wasm_min_f32(vx2, voutput_max_less_zero_point);
vx3 = __builtin_wasm_min_f32(vx3, voutput_max_less_zero_point);
vx0 += vmagic_bias;
vx1 += vmagic_bias;
vx2 += vmagic_bias;
vx3 += vmagic_bias;
int32_t vy0 = (int32_t) float_as_uint32(vx0);
int32_t vy1 = (int32_t) float_as_uint32(vx1);
int32_t vy2 = (int32_t) float_as_uint32(vx2);
int32_t vy3 = (int32_t) float_as_uint32(vx3);
vy0 -= vmagic_bias_less_zero_point;
vy1 -= vmagic_bias_less_zero_point;
vy2 -= vmagic_bias_less_zero_point;
vy3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vy0;
output[1] = (int8_t) vy1;
output[2] = (int8_t) vy2;
output[3] = (int8_t) vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
vx *= vscale;
vx = __builtin_wasm_max_f32(vx, voutput_min_less_zero_point);
vx = __builtin_wasm_min_f32(vx, voutput_max_less_zero_point);
vx += vmagic_bias;
int32_t vy = (int32_t) float_as_uint32(vx);
vy -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,933 | 30.212766 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-cvt-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_cvt_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
vy0123456789ABCDEF = wasm_i8x16_max(vy0123456789ABCDEF, voutput_min);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,515 | 32.954887 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-cvt-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_cvt_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
vxGHIJ = wasm_f32x4_nearest(vxGHIJ);
vxKLMN = wasm_f32x4_nearest(vxKLMN);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vaccGHIJ = wasm_i32x4_trunc_sat_f32x4(vxGHIJ);
v128_t vaccKLMN = wasm_i32x4_trunc_sat_f32x4(vxKLMN);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = wasm_i16x8_add_sat(vaccGHIJKLMN, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMN = wasm_i8x16_narrow_i16x8(vaccGHIJKLMN, vaccGHIJKLMN);
vy0123456789ABCDEF = wasm_i8x16_max(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = wasm_i8x16_max(vyGHIJKLMN, voutput_min);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = wasm_i8x16_min(vyGHIJKLMN, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, vyGHIJKLMN, 0);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,292 | 35.006803 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-cvt-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_cvt_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
v128_t vxOPQR = wasm_v128_load(input + 24);
v128_t vxSTUV = wasm_v128_load(input + 28);
input += 32;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vxOPQR = wasm_f32x4_mul(vxOPQR, vscale);
vxSTUV = wasm_f32x4_mul(vxSTUV, vscale);
vx0123 = wasm_f32x4_nearest(vx0123);
vx4567 = wasm_f32x4_nearest(vx4567);
vx89AB = wasm_f32x4_nearest(vx89AB);
vxCDEF = wasm_f32x4_nearest(vxCDEF);
vxGHIJ = wasm_f32x4_nearest(vxGHIJ);
vxKLMN = wasm_f32x4_nearest(vxKLMN);
vxOPQR = wasm_f32x4_nearest(vxOPQR);
vxSTUV = wasm_f32x4_nearest(vxSTUV);
v128_t vacc0123 = wasm_i32x4_trunc_sat_f32x4(vx0123);
v128_t vacc4567 = wasm_i32x4_trunc_sat_f32x4(vx4567);
v128_t vacc89AB = wasm_i32x4_trunc_sat_f32x4(vx89AB);
v128_t vaccCDEF = wasm_i32x4_trunc_sat_f32x4(vxCDEF);
v128_t vaccGHIJ = wasm_i32x4_trunc_sat_f32x4(vxGHIJ);
v128_t vaccKLMN = wasm_i32x4_trunc_sat_f32x4(vxKLMN);
v128_t vaccOPQR = wasm_i32x4_trunc_sat_f32x4(vxOPQR);
v128_t vaccSTUV = wasm_i32x4_trunc_sat_f32x4(vxSTUV);
v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vaccOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
vacc01234567 = wasm_i16x8_add_sat(vacc01234567, voutput_zero_point);
vacc89ABCDEF = wasm_i16x8_add_sat(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = wasm_i16x8_add_sat(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = wasm_i16x8_add_sat(vaccOPQRSTUV, voutput_zero_point);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(vaccGHIJKLMN, vaccOPQRSTUV);
vy0123456789ABCDEF = wasm_i8x16_max(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = wasm_i8x16_max(vyGHIJKLMNOPQRSTUV, voutput_min);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = wasm_i8x16_min(vyGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,858 | 36.318471 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-cvt-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-cvt.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_cvt_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_cvt.scale);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_cvt.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd_cvt.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_cvt.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_nearest(vx_lo);
vx_hi = wasm_f32x4_nearest(vx_hi);
v128_t vacc_lo = wasm_i32x4_trunc_sat_f32x4(vx_lo);
v128_t vacc_hi = wasm_i32x4_trunc_sat_f32x4(vx_hi);
v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_max(vy, voutput_min);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,066 | 30.947917 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-magic-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_magic_x16(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,861 | 35.014815 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-magic-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_magic_x24(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
vxGHIJ = wasm_f32x4_add(vxGHIJ, vmagic_bias);
vxKLMN = wasm_f32x4_add(vxKLMN, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
v128_t vaccGHIJ = wasm_i32x4_max(vxGHIJ, vmagic_min);
v128_t vaccKLMN = wasm_i32x4_max(vxKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMN = wasm_i8x16_narrow_i16x8(vaccGHIJKLMN, vaccGHIJKLMN);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMN = wasm_i8x16_min(vyGHIJKLMN, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, vyGHIJKLMN, 0);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 5,671 | 37.067114 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-magic-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_magic_x32(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
v128_t vxOPQR = wasm_v128_load(input + 24);
v128_t vxSTUV = wasm_v128_load(input + 28);
input += 32;
vx0123 = wasm_f32x4_mul(vx0123, vscale);
vx4567 = wasm_f32x4_mul(vx4567, vscale);
vx89AB = wasm_f32x4_mul(vx89AB, vscale);
vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vscale);
vxKLMN = wasm_f32x4_mul(vxKLMN, vscale);
vxOPQR = wasm_f32x4_mul(vxOPQR, vscale);
vxSTUV = wasm_f32x4_mul(vxSTUV, vscale);
vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
vxGHIJ = wasm_f32x4_add(vxGHIJ, vmagic_bias);
vxKLMN = wasm_f32x4_add(vxKLMN, vmagic_bias);
vxOPQR = wasm_f32x4_add(vxOPQR, vmagic_bias);
vxSTUV = wasm_f32x4_add(vxSTUV, vmagic_bias);
v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
v128_t vaccGHIJ = wasm_i32x4_max(vxGHIJ, vmagic_min);
v128_t vaccKLMN = wasm_i32x4_max(vxKLMN, vmagic_min);
v128_t vaccOPQR = wasm_i32x4_max(vxOPQR, vmagic_min);
v128_t vaccSTUV = wasm_i32x4_max(vxSTUV, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_zero_point);
vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_zero_point);
vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_zero_point);
const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
const v128_t vaccGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
const v128_t vaccOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
v128_t vy0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
v128_t vyGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(vaccGHIJKLMN, vaccOPQRSTUV);
vy0123456789ABCDEF = wasm_i8x16_min(vy0123456789ABCDEF, voutput_max);
vyGHIJKLMNOPQRSTUV = wasm_i8x16_min(vyGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vy0123456789ABCDEF);
wasm_v128_store(output + 16, vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 6,312 | 38.45625 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qs8-vcvt/gen/f32-qs8-vcvt-wasmsimd-magic-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qs8_vcvt_ukernel__wasmsimd_magic_x8(
size_t batch,
const float* input,
int8_t* output,
const union xnn_f32_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx_lo = wasm_v128_load(input);
v128_t vx_hi = wasm_v128_load(input + 4);
input += 8;
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
v128_t vx_lo = wasm_v128_load(input);
const float* x_hi = (const float*) ((uintptr_t) input + (batch & (4 * sizeof(float))));
v128_t vx_hi = wasm_v128_load(x_hi);
vx_lo = wasm_f32x4_mul(vx_lo, vscale);
vx_hi = wasm_f32x4_mul(vx_hi, vscale);
vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
vy = wasm_i8x16_min(vy, voutput_max);
if (batch & (4 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(float))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,305 | 32.734694 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,810 | 33.963303 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx_x24(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
__m256 vxGHIJKLMN = _mm256_loadu_ps(input + 16);
input += 24;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vxGHIJKLMN = _mm256_mul_ps(vxGHIJKLMN, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
vxGHIJKLMN = _mm256_min_ps(vxGHIJKLMN, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
const __m256i vaccGHIJKLMN = _mm256_cvtps_epi32(vxGHIJKLMN);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
__m128i vyGHIJKLMN = _mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extractf128_si256(vaccGHIJKLMN, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
vyGHIJKLMN = _mm_packus_epi16(vyGHIJKLMN, vyGHIJKLMN);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMN = _mm_max_epu8(vyGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), vyGHIJKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,416 | 36.432203 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx01234567 = _mm256_loadu_ps(input);
__m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
__m256 vxGHIJKLMN = _mm256_loadu_ps(input + 16);
__m256 vxOPQRSTUV = _mm256_loadu_ps(input + 24);
input += 32;
vx01234567 = _mm256_mul_ps(vx01234567, vscale);
vx89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vscale);
vxGHIJKLMN = _mm256_mul_ps(vxGHIJKLMN, vscale);
vxOPQRSTUV = _mm256_mul_ps(vxOPQRSTUV, vscale);
vx01234567 = _mm256_min_ps(vx01234567, voutput_max_less_zero_point);
vx89ABCDEF = _mm256_min_ps(vx89ABCDEF, voutput_max_less_zero_point);
vxGHIJKLMN = _mm256_min_ps(vxGHIJKLMN, voutput_max_less_zero_point);
vxOPQRSTUV = _mm256_min_ps(vxOPQRSTUV, voutput_max_less_zero_point);
const __m256i vacc01234567 = _mm256_cvtps_epi32(vx01234567);
const __m256i vacc89ABCDEF = _mm256_cvtps_epi32(vx89ABCDEF);
const __m256i vaccGHIJKLMN = _mm256_cvtps_epi32(vxGHIJKLMN);
const __m256i vaccOPQRSTUV = _mm256_cvtps_epi32(vxOPQRSTUV);
__m128i vy01234567 = _mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extractf128_si256(vacc01234567, 1));
__m128i vy89ABCDEF = _mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extractf128_si256(vacc89ABCDEF, 1));
__m128i vyGHIJKLMN = _mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extractf128_si256(vaccGHIJKLMN, 1));
__m128i vyOPQRSTUV = _mm_packs_epi32(_mm256_castsi256_si128(vaccOPQRSTUV), _mm256_extractf128_si256(vaccOPQRSTUV, 1));
vy01234567 = _mm_adds_epi16(vy01234567, voutput_zero_point);
vy89ABCDEF = _mm_adds_epi16(vy89ABCDEF, voutput_zero_point);
vyGHIJKLMN = _mm_adds_epi16(vyGHIJKLMN, voutput_zero_point);
vyOPQRSTUV = _mm_adds_epi16(vyOPQRSTUV, voutput_zero_point);
__m128i vy0123456789ABCDEF = _mm_packus_epi16(vy01234567, vy89ABCDEF);
__m128i vyGHIJKLMNOPQRSTUV = _mm_packus_epi16(vyGHIJKLMN, vyOPQRSTUV);
vy0123456789ABCDEF = _mm_max_epu8(vy0123456789ABCDEF, voutput_min);
vyGHIJKLMNOPQRSTUV = _mm_max_epu8(vyGHIJKLMNOPQRSTUV, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123456789ABCDEF);
_mm_storeu_si128((__m128i*) (output + 16), vyGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,887 | 38.419355 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx_x8(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, voutput_zero_point);
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,629 | 31.073171 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx2_x16(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
input += 16;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, voutput_min);
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, voutput_min);
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,612 | 32.146789 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx2_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
input += 32;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
const __m256i vy02461357 = _mm256_packus_epi16(vacc0213, vacc4657);
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
vy01234567 = _mm256_max_epu8(vy01234567, voutput_min);
_mm256_storeu_si256((__m256i*) output, vy01234567);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,263 | 34.533333 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx2_x48(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
__m256 vx89 = _mm256_loadu_ps(input + 32);
__m256 vxAB = _mm256_loadu_ps(input + 40);
input += 48;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx89 = _mm256_mul_ps(vx89, vscale);
vxAB = _mm256_mul_ps(vxAB, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
__m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
const __m256i vy02461357 = _mm256_packus_epi16(vacc0213, vacc4657);
const __m128i vy8A9B = _mm_packus_epi16(_mm256_castsi256_si128(vacc8A9B), _mm256_extracti128_si256(vacc8A9B, 1));
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
__m128i vy89AB = _mm_shuffle_epi32(vy8A9B, _MM_SHUFFLE(3, 1, 2, 0));
vy01234567 = _mm256_max_epu8(vy01234567, voutput_min);
vy89AB = _mm_max_epu8(vy89AB, _mm256_castsi256_si128(voutput_min));
_mm256_storeu_si256((__m256i*) output, vy01234567);
_mm_storeu_si128((__m128i*) (output + 32), vy89AB);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,107 | 37.119403 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx2_x64(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vscale = _mm256_load_ps(params->avx2.scale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
const __m256i vshuffle_mask = _mm256_load_si256((const __m256i*) params->avx2.shuffle_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx2.output_min);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx01 = _mm256_loadu_ps(input);
__m256 vx23 = _mm256_loadu_ps(input + 8);
__m256 vx45 = _mm256_loadu_ps(input + 16);
__m256 vx67 = _mm256_loadu_ps(input + 24);
__m256 vx89 = _mm256_loadu_ps(input + 32);
__m256 vxAB = _mm256_loadu_ps(input + 40);
__m256 vxCD = _mm256_loadu_ps(input + 48);
__m256 vxEF = _mm256_loadu_ps(input + 56);
input += 64;
vx01 = _mm256_mul_ps(vx01, vscale);
vx23 = _mm256_mul_ps(vx23, vscale);
vx45 = _mm256_mul_ps(vx45, vscale);
vx67 = _mm256_mul_ps(vx67, vscale);
vx89 = _mm256_mul_ps(vx89, vscale);
vxAB = _mm256_mul_ps(vxAB, vscale);
vxCD = _mm256_mul_ps(vxCD, vscale);
vxEF = _mm256_mul_ps(vxEF, vscale);
vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
vx45 = _mm256_min_ps(vx45, voutput_max_less_zero_point);
vx67 = _mm256_min_ps(vx67, voutput_max_less_zero_point);
vx89 = _mm256_min_ps(vx89, voutput_max_less_zero_point);
vxAB = _mm256_min_ps(vxAB, voutput_max_less_zero_point);
vxCD = _mm256_min_ps(vxCD, voutput_max_less_zero_point);
vxEF = _mm256_min_ps(vxEF, voutput_max_less_zero_point);
const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
const __m256i vacc45 = _mm256_cvtps_epi32(vx45);
const __m256i vacc67 = _mm256_cvtps_epi32(vx67);
const __m256i vacc89 = _mm256_cvtps_epi32(vx89);
const __m256i vaccAB = _mm256_cvtps_epi32(vxAB);
const __m256i vaccCD = _mm256_cvtps_epi32(vxCD);
const __m256i vaccEF = _mm256_cvtps_epi32(vxEF);
__m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
__m256i vacc4657 = _mm256_packs_epi32(vacc45, vacc67);
__m256i vacc8A9B = _mm256_packs_epi32(vacc89, vaccAB);
__m256i vaccCEDF = _mm256_packs_epi32(vaccCD, vaccEF);
vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
vacc4657 = _mm256_adds_epi16(vacc4657, voutput_zero_point);
vacc8A9B = _mm256_adds_epi16(vacc8A9B, voutput_zero_point);
vaccCEDF = _mm256_adds_epi16(vaccCEDF, voutput_zero_point);
const __m256i vy02461357 = _mm256_packus_epi16(vacc0213, vacc4657);
const __m256i vy8ACE9BDF = _mm256_packus_epi16(vacc8A9B, vaccCEDF);
__m256i vy01234567 = _mm256_permutevar8x32_epi32(vy02461357, vshuffle_mask);
__m256i vy89ABCDEF = _mm256_permutevar8x32_epi32(vy8ACE9BDF, vshuffle_mask);
vy01234567 = _mm256_max_epu8(vy01234567, voutput_min);
vy89ABCDEF = _mm256_max_epu8(vy89ABCDEF, voutput_min);
_mm256_storeu_si256((__m256i*) output, vy01234567);
_mm256_storeu_si256((__m256i*) (output + 32), vy89ABCDEF);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
input += 8;
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
vx = _mm256_mul_ps(vx, vscale);
vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
const __m256i vacc = _mm256_cvtps_epi32(vx);
__m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
vy = _mm_packus_epi16(vy, vy);
vy = _mm_max_epu8(vy, _mm256_castsi256_si128(voutput_min));
if (batch & (4 * sizeof(float))) {
_mm_storeu_si32(output, vy);
output += 4;
vy = _mm_srli_epi64(vy, 32);
}
if (batch & (2 * sizeof(float))) {
_mm_storeu_si16(output, vy);
output += 2;
vy = _mm_srli_epi32(vy, 16);
}
if (batch & (1 * sizeof(float))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,588 | 37.8125 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx512skx-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx512skx_x128(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
__m512 vxGHIJ = _mm512_loadu_ps(input + 64);
__m512 vxKLMN = _mm512_loadu_ps(input + 80);
__m512 vxOPQR = _mm512_loadu_ps(input + 96);
__m512 vxSTUV = _mm512_loadu_ps(input + 112);
input += 128;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm512_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm512_mul_ps(vxKLMN, vscale);
vxOPQR = _mm512_mul_ps(vxOPQR, vscale);
vxSTUV = _mm512_mul_ps(vxSTUV, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm512_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm512_min_ps(vxKLMN, voutput_max_less_zero_point);
vxOPQR = _mm512_min_ps(vxOPQR, voutput_max_less_zero_point);
vxSTUV = _mm512_min_ps(vxSTUV, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
const __m512i vaccGHIJ = _mm512_cvtps_epi32(vxGHIJ);
const __m512i vaccKLMN = _mm512_cvtps_epi32(vxKLMN);
const __m512i vaccOPQR = _mm512_cvtps_epi32(vxOPQR);
const __m512i vaccSTUV = _mm512_cvtps_epi32(vxSTUV);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
__m512i vaccGKHLIMJN = _mm512_packs_epi32(vaccGHIJ, vaccKLMN);
__m512i vaccOSPTQURV = _mm512_packs_epi32(vaccOPQR, vaccSTUV);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
vaccGKHLIMJN = _mm512_adds_epi16(vaccGKHLIMJN, voutput_zero_point);
vaccOSPTQURV = _mm512_adds_epi16(vaccOSPTQURV, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packus_epi16(vacc04152637, vacc8C9DAEBF);
__m512i vyGKOSHLPTIMQUJNRV = _mm512_packus_epi16(vaccGKHLIMJN, vaccOSPTQURV);
vy048C159D26AE37BF = _mm512_max_epu8(vy048C159D26AE37BF, voutput_min);
vyGKOSHLPTIMQUJNRV = _mm512_max_epu8(vyGKOSHLPTIMQUJNRV, voutput_min);
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
const __m512i vyGHIJKLMNOPQRSTUV = _mm512_permutexvar_epi32(vshuffle512_mask, vyGKOSHLPTIMQUJNRV);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm512_storeu_si512(output + 64, vyGHIJKLMNOPQRSTUV);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 6,069 | 43.632353 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx512skx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx512skx_x32(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m256i vshuffle256_mask = _mm256_load_si256((const __m256i*) params->avx512.shuffle256_mask);
const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx512.output_min);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
input += 32;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
__m256i vy04261537 = _mm256_packus_epi16(_mm512_castsi512_si256(vacc04152637), _mm512_extracti32x8_epi32(vacc04152637, 1));
vy04261537 = _mm256_max_epu8(vy04261537, voutput_min);
const __m256i vy01234567 = _mm256_permutevar8x32_epi32(vy04261537, vshuffle256_mask);
_mm256_storeu_si256((__m256i*) output, vy01234567);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm256_castsi256_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm256_castsi256_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 4,092 | 39.127451 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx512skx-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx512skx_x64(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
input += 64;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packus_epi16(vacc04152637, vacc8C9DAEBF);
vy048C159D26AE37BF = _mm512_max_epu8(vy048C159D26AE37BF, voutput_min);
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 4,607 | 40.142857 | 117 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx512skx-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-qs8-vcvt/avx512skx.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_f32_qu8_vcvt_ukernel__avx512skx_x96(
size_t batch,
const float* input,
uint8_t* output,
const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vscale = _mm512_load_ps(params->avx512.scale);
const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->avx512.output_max_less_zero_point);
const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point);
const __m512i vshuffle512_mask = _mm512_load_si512(params->avx512.shuffle512_mask);
const __m256i vshuffle256_mask = _mm256_load_si256((const __m256i*) params->avx512.shuffle256_mask);
const __m512i voutput_min = _mm512_load_si512(params->avx512.output_min);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
__m512 vx4567 = _mm512_loadu_ps(input + 16);
__m512 vx89AB = _mm512_loadu_ps(input + 32);
__m512 vxCDEF = _mm512_loadu_ps(input + 48);
__m512 vxGHIJ = _mm512_loadu_ps(input + 64);
__m512 vxKLMN = _mm512_loadu_ps(input + 80);
input += 96;
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx4567 = _mm512_mul_ps(vx4567, vscale);
vx89AB = _mm512_mul_ps(vx89AB, vscale);
vxCDEF = _mm512_mul_ps(vxCDEF, vscale);
vxGHIJ = _mm512_mul_ps(vxGHIJ, vscale);
vxKLMN = _mm512_mul_ps(vxKLMN, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
vx4567 = _mm512_min_ps(vx4567, voutput_max_less_zero_point);
vx89AB = _mm512_min_ps(vx89AB, voutput_max_less_zero_point);
vxCDEF = _mm512_min_ps(vxCDEF, voutput_max_less_zero_point);
vxGHIJ = _mm512_min_ps(vxGHIJ, voutput_max_less_zero_point);
vxKLMN = _mm512_min_ps(vxKLMN, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
const __m512i vacc4567 = _mm512_cvtps_epi32(vx4567);
const __m512i vacc89AB = _mm512_cvtps_epi32(vx89AB);
const __m512i vaccCDEF = _mm512_cvtps_epi32(vxCDEF);
const __m512i vaccGHIJ = _mm512_cvtps_epi32(vxGHIJ);
const __m512i vaccKLMN = _mm512_cvtps_epi32(vxKLMN);
__m512i vacc04152637 = _mm512_packs_epi32(vacc0123, vacc4567);
__m512i vacc8C9DAEBF = _mm512_packs_epi32(vacc89AB, vaccCDEF);
__m512i vaccGKHLIMJN = _mm512_packs_epi32(vaccGHIJ, vaccKLMN);
vacc04152637 = _mm512_adds_epi16(vacc04152637, voutput_zero_point);
vacc8C9DAEBF = _mm512_adds_epi16(vacc8C9DAEBF, voutput_zero_point);
vaccGKHLIMJN = _mm512_adds_epi16(vaccGKHLIMJN, voutput_zero_point);
__m512i vy048C159D26AE37BF = _mm512_packus_epi16(vacc04152637, vacc8C9DAEBF);
__m256i vyGKIMHLJN = _mm256_packus_epi16(_mm512_castsi512_si256(vaccGKHLIMJN), _mm512_extracti32x8_epi32(vaccGKHLIMJN, 1));
vy048C159D26AE37BF = _mm512_max_epu8(vy048C159D26AE37BF, voutput_min);
vyGKIMHLJN = _mm256_max_epu8(vyGKIMHLJN, _mm512_castsi512_si256(voutput_min));
const __m512i vy0123456789ABCDEF = _mm512_permutexvar_epi32(vshuffle512_mask, vy048C159D26AE37BF);
const __m256i vyGHIJKLMN = _mm256_permutevar8x32_epi32(vyGKIMHLJN, vshuffle256_mask);
_mm512_storeu_si512(output, vy0123456789ABCDEF);
_mm256_storeu_si256((__m256i*) (output + 64), vyGHIJKLMN);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx0123 = _mm512_loadu_ps(input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
input += 16;
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_storeu_si128((__m128i*) output, vy0123);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx0123 = _mm512_maskz_loadu_ps(vmask, input);
vx0123 = _mm512_mul_ps(vx0123, vscale);
vx0123 = _mm512_min_ps(vx0123, voutput_max_less_zero_point);
const __m512i vacc0123 = _mm512_cvtps_epi32(vx0123);
__m256i vacc0213 = _mm256_packs_epi32(_mm512_castsi512_si256(vacc0123), _mm512_extracti32x8_epi32(vacc0123, 1));
vacc0213 = _mm256_adds_epi16(vacc0213, _mm512_castsi512_si256(voutput_zero_point));
const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
__m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
vy0123 = _mm_max_epu8(vy0123, _mm512_castsi512_si128(voutput_min));
_mm_mask_storeu_epi8(output, vmask, vy0123);
}
}
| 5,643 | 43.440945 | 127 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.