repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-4x8c2-minmax-neonbf16-bfdot-lane-ld128.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c2-neonbf16-bfdot-lane-ld128.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_4x8c2__neonbf16_bfdot_lane_ld128( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(bfloat16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const bfloat16_t* a0 = (const bfloat16_t*) a; bfloat16_t* c0 = (bfloat16_t*) c; const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride); bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride); bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride); bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { a3 = a2; c3 = c2; } const bfloat16_t* w = (const bfloat16_t*) w_ptr; do { float32x4_t vacc0x0123 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc0x4567 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; size_t k = kc; for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8; const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8; const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8; const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8; const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c01, va0, 0); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c01, va1, 0); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c01, va2, 0); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c01, va3, 0); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c01, va0, 0); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c01, va1, 0); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c01, va2, 0); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c01, va3, 0); const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c23, va0, 1); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c23, va1, 1); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c23, va2, 1); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c23, va3, 1); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c23, va0, 1); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c23, va1, 1); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c23, va2, 1); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c23, va3, 1); const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c45, va0, 2); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c45, va1, 2); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c45, va2, 2); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c45, va3, 2); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c45, va0, 2); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c45, va1, 2); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c45, va2, 2); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c45, va3, 2); const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c67, va0, 3); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c67, va1, 3); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c67, va2, 3); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c67, va3, 3); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c67, va0, 3); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c67, va1, 3); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c67, va2, 3); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c67, va3, 3); } if XNN_UNLIKELY(k != 0) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k); const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k); const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k); const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k); const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; const uint32x4_t va0c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 0); const uint32x4_t va1c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 0); const uint32x4_t va2c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 0); const uint32x4_t va3c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 0); const uint32x4_t vm0123c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c01), vmovq_n_u16(0))); const uint32x4_t vm4567c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c01), vmovq_n_u16(0))); const uint32x4_t va0x0123c01 = vbicq_u32(va0c01, vm0123c01); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c01, vreinterpretq_bf16_u32(va0x0123c01)); const uint32x4_t va1x0123c01 = vbicq_u32(va1c01, vm0123c01); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c01, vreinterpretq_bf16_u32(va1x0123c01)); const uint32x4_t va2x0123c01 = vbicq_u32(va2c01, vm0123c01); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c01, vreinterpretq_bf16_u32(va2x0123c01)); const uint32x4_t va3x0123c01 = vbicq_u32(va3c01, vm0123c01); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c01, vreinterpretq_bf16_u32(va3x0123c01)); const uint32x4_t va0x4567c01 = vbicq_u32(va0c01, vm4567c01); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c01, vreinterpretq_bf16_u32(va0x4567c01)); const uint32x4_t va1x4567c01 = vbicq_u32(va1c01, vm4567c01); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c01, vreinterpretq_bf16_u32(va1x4567c01)); const uint32x4_t va2x4567c01 = vbicq_u32(va2c01, vm4567c01); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c01, vreinterpretq_bf16_u32(va2x4567c01)); const uint32x4_t va3x4567c01 = vbicq_u32(va3c01, vm4567c01); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c01, vreinterpretq_bf16_u32(va3x4567c01)); if (k > 2 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; const uint32x4_t va0c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 1); const uint32x4_t va1c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 1); const uint32x4_t va2c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 1); const uint32x4_t va3c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 1); const uint32x4_t vm0123c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c23), vmovq_n_u16(0))); const uint32x4_t vm4567c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c23), vmovq_n_u16(0))); const uint32x4_t va0x0123c23 = vbicq_u32(va0c23, vm0123c23); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c23, vreinterpretq_bf16_u32(va0x0123c23)); const uint32x4_t va1x0123c23 = vbicq_u32(va1c23, vm0123c23); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c23, vreinterpretq_bf16_u32(va1x0123c23)); const uint32x4_t va2x0123c23 = vbicq_u32(va2c23, vm0123c23); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c23, vreinterpretq_bf16_u32(va2x0123c23)); const uint32x4_t va3x0123c23 = vbicq_u32(va3c23, vm0123c23); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c23, vreinterpretq_bf16_u32(va3x0123c23)); const uint32x4_t va0x4567c23 = vbicq_u32(va0c23, vm4567c23); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c23, vreinterpretq_bf16_u32(va0x4567c23)); const uint32x4_t va1x4567c23 = vbicq_u32(va1c23, vm4567c23); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c23, vreinterpretq_bf16_u32(va1x4567c23)); const uint32x4_t va2x4567c23 = vbicq_u32(va2c23, vm4567c23); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c23, vreinterpretq_bf16_u32(va2x4567c23)); const uint32x4_t va3x4567c23 = vbicq_u32(va3c23, vm4567c23); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c23, vreinterpretq_bf16_u32(va3x4567c23)); if (k > 4 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; const uint32x4_t va0c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 0); const uint32x4_t va1c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 0); const uint32x4_t va2c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 0); const uint32x4_t va3c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 0); const uint32x4_t vm0123c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c45), vmovq_n_u16(0))); const uint32x4_t vm4567c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c45), vmovq_n_u16(0))); const uint32x4_t va0x0123c45 = vbicq_u32(va0c45, vm0123c45); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c45, vreinterpretq_bf16_u32(va0x0123c45)); const uint32x4_t va1x0123c45 = vbicq_u32(va1c45, vm0123c45); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c45, vreinterpretq_bf16_u32(va1x0123c45)); const uint32x4_t va2x0123c45 = vbicq_u32(va2c45, vm0123c45); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c45, vreinterpretq_bf16_u32(va2x0123c45)); const uint32x4_t va3x0123c45 = vbicq_u32(va3c45, vm0123c45); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c45, vreinterpretq_bf16_u32(va3x0123c45)); const uint32x4_t va0x4567c45 = vbicq_u32(va0c45, vm4567c45); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c45, vreinterpretq_bf16_u32(va0x4567c45)); const uint32x4_t va1x4567c45 = vbicq_u32(va1c45, vm4567c45); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c45, vreinterpretq_bf16_u32(va1x4567c45)); const uint32x4_t va2x4567c45 = vbicq_u32(va2c45, vm4567c45); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c45, vreinterpretq_bf16_u32(va2x4567c45)); const uint32x4_t va3x4567c45 = vbicq_u32(va3c45, vm4567c45); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c45, vreinterpretq_bf16_u32(va3x4567c45)); if (k > 6 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; const uint32x4_t va0c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 1); const uint32x4_t va1c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 1); const uint32x4_t va2c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 1); const uint32x4_t va3c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 1); const uint32x4_t vm0123c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c67), vmovq_n_u16(0))); const uint32x4_t vm4567c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c67), vmovq_n_u16(0))); const uint32x4_t va0x0123c67 = vbicq_u32(va0c67, vm0123c67); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c67, vreinterpretq_bf16_u32(va0x0123c67)); const uint32x4_t va1x0123c67 = vbicq_u32(va1c67, vm0123c67); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c67, vreinterpretq_bf16_u32(va1x0123c67)); const uint32x4_t va2x0123c67 = vbicq_u32(va2c67, vm0123c67); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c67, vreinterpretq_bf16_u32(va2x0123c67)); const uint32x4_t va3x0123c67 = vbicq_u32(va3c67, vm0123c67); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c67, vreinterpretq_bf16_u32(va3x0123c67)); const uint32x4_t va0x4567c67 = vbicq_u32(va0c67, vm4567c67); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c67, vreinterpretq_bf16_u32(va0x4567c67)); const uint32x4_t va1x4567c67 = vbicq_u32(va1c67, vm4567c67); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c67, vreinterpretq_bf16_u32(va1x4567c67)); const uint32x4_t va2x4567c67 = vbicq_u32(va2c67, vm4567c67); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c67, vreinterpretq_bf16_u32(va2x4567c67)); const uint32x4_t va3x4567c67 = vbicq_u32(va3c67, vm4567c67); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c67, vreinterpretq_bf16_u32(va3x4567c67)); } } } } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123); bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123); bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123); bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123); bfloat16x4_t vout0x4567 = vcvt_bf16_f32(vacc0x4567); bfloat16x4_t vout1x4567 = vcvt_bf16_f32(vacc1x4567); bfloat16x4_t vout2x4567 = vcvt_bf16_f32(vacc2x4567); bfloat16x4_t vout3x4567 = vcvt_bf16_f32(vacc3x4567); if XNN_LIKELY(nc >= 8) { vst1_bf16(c0, vout0x0123); vst1_bf16(c0 + 4, vout0x4567); c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride); vst1_bf16(c1, vout1x0123); vst1_bf16(c1 + 4, vout1x4567); c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride); vst1_bf16(c2, vout2x0123); vst1_bf16(c2 + 4, vout2x4567); c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride); vst1_bf16(c3, vout3x0123); vst1_bf16(c3 + 4, vout3x4567); c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride); a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc); a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc); a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc); a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc); nc -= 8; } else { if (nc & 4) { vst1_bf16(c0, vout0x0123); c0 += 4; vst1_bf16(c1, vout1x0123); c1 += 4; vst1_bf16(c2, vout2x0123); c2 += 4; vst1_bf16(c3, vout3x0123); c3 += 4; vout0x0123 = vout0x4567; vout1x0123 = vout1x4567; vout2x0123 = vout2x4567; vout3x0123 = vout3x4567; } if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2; vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2)); vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2)); vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2)); vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2)); } if (nc & 1) { vst1_lane_bf16(c0, vout0x0123, 0); vst1_lane_bf16(c1, vout1x0123, 0); vst1_lane_bf16(c2, vout2x0123, 0); vst1_lane_bf16(c3, vout3x0123, 0); } nc = 0; } } while (nc != 0); }
17,488
51.836858
126
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-5x4c8-minmax-neonbf16-bfdot.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c8-neonbf16.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_5x4c8__neonbf16_bfdot( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(bfloat16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const bfloat16_t* a0 = (const bfloat16_t*) a; bfloat16_t* c0 = (bfloat16_t*) c; const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride); bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride); bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride); bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const bfloat16_t* a4 = (const bfloat16_t*) ((uintptr_t) a3 + a_stride); bfloat16_t* c4 = (bfloat16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const bfloat16_t* w = (const bfloat16_t*) w_ptr; do { float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc1x0 = vacc0x0; float32x4_t vacc1x1 = vacc0x1; float32x4_t vacc1x2 = vacc0x2; float32x4_t vacc1x3 = vacc0x3; float32x4_t vacc2x0 = vacc0x0; float32x4_t vacc2x1 = vacc0x1; float32x4_t vacc2x2 = vacc0x2; float32x4_t vacc2x3 = vacc0x3; float32x4_t vacc3x0 = vacc0x0; float32x4_t vacc3x1 = vacc0x1; float32x4_t vacc3x2 = vacc0x2; float32x4_t vacc3x3 = vacc0x3; float32x4_t vacc4x0 = vacc0x0; float32x4_t vacc4x1 = vacc0x1; float32x4_t vacc4x2 = vacc0x2; float32x4_t vacc4x3 = vacc0x3; size_t k = kc; for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8; const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8; const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8; const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8; const bfloat16x8_t va4 = vld1q_bf16(a4); a4 += 8; const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8; vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0); vacc1x0 = vbfdotq_f32(vacc1x0, va1, vb0); vacc2x0 = vbfdotq_f32(vacc2x0, va2, vb0); vacc3x0 = vbfdotq_f32(vacc3x0, va3, vb0); vacc4x0 = vbfdotq_f32(vacc4x0, va4, vb0); vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1); vacc1x1 = vbfdotq_f32(vacc1x1, va1, vb1); vacc2x1 = vbfdotq_f32(vacc2x1, va2, vb1); vacc3x1 = vbfdotq_f32(vacc3x1, va3, vb1); vacc4x1 = vbfdotq_f32(vacc4x1, va4, vb1); vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2); vacc1x2 = vbfdotq_f32(vacc1x2, va1, vb2); vacc2x2 = vbfdotq_f32(vacc2x2, va2, vb2); vacc3x2 = vbfdotq_f32(vacc3x2, va3, vb2); vacc4x2 = vbfdotq_f32(vacc4x2, va4, vb2); vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3); vacc1x3 = vbfdotq_f32(vacc1x3, va1, vb3); vacc2x3 = vbfdotq_f32(vacc2x3, va2, vb3); vacc3x3 = vbfdotq_f32(vacc3x3, va3, vb3); vacc4x3 = vbfdotq_f32(vacc4x3, va4, vb3); } if XNN_UNLIKELY(k != 0) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k); const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k); const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k); const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k); const bfloat16x8_t va4 = vld1q_bf16(a4); a4 = (const bfloat16_t*) ((uintptr_t) a4 + k); const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8; const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0)); const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0)); const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0)); const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0)); const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0)); vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0); const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0)); vacc1x0 = vbfdotq_f32(vacc1x0, va1x0, vb0); const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0)); vacc2x0 = vbfdotq_f32(vacc2x0, va2x0, vb0); const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0)); vacc3x0 = vbfdotq_f32(vacc3x0, va3x0, vb0); const bfloat16x8_t va4x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm0)); vacc4x0 = vbfdotq_f32(vacc4x0, va4x0, vb0); const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1)); vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1); const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1)); vacc1x1 = vbfdotq_f32(vacc1x1, va1x1, vb1); const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1)); vacc2x1 = vbfdotq_f32(vacc2x1, va2x1, vb1); const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1)); vacc3x1 = vbfdotq_f32(vacc3x1, va3x1, vb1); const bfloat16x8_t va4x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm1)); vacc4x1 = vbfdotq_f32(vacc4x1, va4x1, vb1); const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2)); vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2); const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2)); vacc1x2 = vbfdotq_f32(vacc1x2, va1x2, vb2); const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2)); vacc2x2 = vbfdotq_f32(vacc2x2, va2x2, vb2); const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2)); vacc3x2 = vbfdotq_f32(vacc3x2, va3x2, vb2); const bfloat16x8_t va4x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm2)); vacc4x2 = vbfdotq_f32(vacc4x2, va4x2, vb2); const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3)); vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3); const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3)); vacc1x3 = vbfdotq_f32(vacc1x3, va1x3, vb3); const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3)); vacc2x3 = vbfdotq_f32(vacc2x3, va2x3, vb3); const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3)); vacc3x3 = vbfdotq_f32(vacc3x3, va3x3, vb3); const bfloat16x8_t va4x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm3)); vacc4x3 = vbfdotq_f32(vacc4x3, va4x3, vb3); } #if XNN_ARCH_ARM64 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1); const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1); const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1); const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1); const float32x4_t vacc4x01 = vpaddq_f32(vacc4x0, vacc4x1); const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3); const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3); const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3); const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3); const float32x4_t vacc4x23 = vpaddq_f32(vacc4x2, vacc4x3); float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23); float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23); float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); float32x4_t vacc4x0123 = vpaddq_f32(vacc4x01, vacc4x23); #else const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0)); const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0)); const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0)); const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0)); const float32x2_t vsum4x0 = vadd_f32(vget_low_f32(vacc4x0), vget_high_f32(vacc4x0)); const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1)); const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1)); const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1)); const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1)); const float32x2_t vsum4x1 = vadd_f32(vget_low_f32(vacc4x1), vget_high_f32(vacc4x1)); const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2)); const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2)); const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2)); const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2)); const float32x2_t vsum4x2 = vadd_f32(vget_low_f32(vacc4x2), vget_high_f32(vacc4x2)); const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3)); const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3)); const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3)); const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3)); const float32x2_t vsum4x3 = vadd_f32(vget_low_f32(vacc4x3), vget_high_f32(vacc4x3)); float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3)); float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3)); float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); float32x4_t vacc4x0123 = vcombine_f32(vpadd_f32(vsum4x0, vsum4x1), vpadd_f32(vsum4x2, vsum4x3)); #endif const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123); bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123); bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123); bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123); bfloat16x4_t vout4x0123 = vcvt_bf16_f32(vacc4x0123); if XNN_LIKELY(nc >= 4) { vst1_bf16(c0, vout0x0123); c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride); vst1_bf16(c1, vout1x0123); c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride); vst1_bf16(c2, vout2x0123); c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride); vst1_bf16(c3, vout3x0123); c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride); vst1_bf16(c4, vout4x0123); c4 = (bfloat16_t*) ((uintptr_t) c4 + cn_stride); a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc); a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc); a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc); a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc); a4 = (const bfloat16_t*) ((uintptr_t) a4 - kc); nc -= 4; } else { if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_bf16(vout4x0123), 0); c4 += 2; vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2)); vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2)); vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2)); vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2)); vout4x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout4x0123), vreinterpret_u16_bf16(vout4x0123), 2)); } if (nc & 1) { vst1_lane_bf16(c0, vout0x0123, 0); vst1_lane_bf16(c1, vout1x0123, 0); vst1_lane_bf16(c2, vout2x0123, 0); vst1_lane_bf16(c3, vout3x0123, 0); vst1_lane_bf16(c4, vout4x0123, 0); } nc = 0; } } while (nc != 0); }
14,729
49.273038
126
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-5x4c8-minmax-neonbf16-bfmlal.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c8-neonbf16.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_5x4c8__neonbf16_bfmlal( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(bfloat16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const bfloat16_t* a0 = (const bfloat16_t*) a; bfloat16_t* c0 = (bfloat16_t*) c; const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride); bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride); bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride); bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const bfloat16_t* a4 = (const bfloat16_t*) ((uintptr_t) a3 + a_stride); bfloat16_t* c4 = (bfloat16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const bfloat16_t* w = (const bfloat16_t*) w_ptr; do { float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1; float32x4_t vacc1x0 = vacc0x0; float32x4_t vacc1x1 = vacc0x1; float32x4_t vacc1x2 = vacc0x2; float32x4_t vacc1x3 = vacc0x3; float32x4_t vacc2x0 = vacc0x0; float32x4_t vacc2x1 = vacc0x1; float32x4_t vacc2x2 = vacc0x2; float32x4_t vacc2x3 = vacc0x3; float32x4_t vacc3x0 = vacc0x0; float32x4_t vacc3x1 = vacc0x1; float32x4_t vacc3x2 = vacc0x2; float32x4_t vacc3x3 = vacc0x3; float32x4_t vacc4x0 = vacc0x0; float32x4_t vacc4x1 = vacc0x1; float32x4_t vacc4x2 = vacc0x2; float32x4_t vacc4x3 = vacc0x3; size_t k = kc; for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8; const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8; const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8; const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8; const bfloat16x8_t va4 = vld1q_bf16(a4); a4 += 8; const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8; vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0); vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0); vacc2x0 = vbfmlalbq_f32(vacc2x0, va2, vb0); vacc3x0 = vbfmlalbq_f32(vacc3x0, va3, vb0); vacc4x0 = vbfmlalbq_f32(vacc4x0, va4, vb0); vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1); vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1); vacc2x1 = vbfmlalbq_f32(vacc2x1, va2, vb1); vacc3x1 = vbfmlalbq_f32(vacc3x1, va3, vb1); vacc4x1 = vbfmlalbq_f32(vacc4x1, va4, vb1); vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2); vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2); vacc2x2 = vbfmlalbq_f32(vacc2x2, va2, vb2); vacc3x2 = vbfmlalbq_f32(vacc3x2, va3, vb2); vacc4x2 = vbfmlalbq_f32(vacc4x2, va4, vb2); vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3); vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3); vacc2x3 = vbfmlalbq_f32(vacc2x3, va2, vb3); vacc3x3 = vbfmlalbq_f32(vacc3x3, va3, vb3); vacc4x3 = vbfmlalbq_f32(vacc4x3, va4, vb3); vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0); vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0); vacc2x0 = vbfmlaltq_f32(vacc2x0, va2, vb0); vacc3x0 = vbfmlaltq_f32(vacc3x0, va3, vb0); vacc4x0 = vbfmlaltq_f32(vacc4x0, va4, vb0); vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1); vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1); vacc2x1 = vbfmlaltq_f32(vacc2x1, va2, vb1); vacc3x1 = vbfmlaltq_f32(vacc3x1, va3, vb1); vacc4x1 = vbfmlaltq_f32(vacc4x1, va4, vb1); vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2); vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2); vacc2x2 = vbfmlaltq_f32(vacc2x2, va2, vb2); vacc3x2 = vbfmlaltq_f32(vacc3x2, va3, vb2); vacc4x2 = vbfmlaltq_f32(vacc4x2, va4, vb2); vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3); vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3); vacc2x3 = vbfmlaltq_f32(vacc2x3, va2, vb3); vacc3x3 = vbfmlaltq_f32(vacc3x3, va3, vb3); vacc4x3 = vbfmlaltq_f32(vacc4x3, va4, vb3); } if XNN_UNLIKELY(k != 0) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k); const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k); const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k); const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k); const bfloat16x8_t va4 = vld1q_bf16(a4); a4 = (const bfloat16_t*) ((uintptr_t) a4 + k); const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8; const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0)); const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0)); const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0)); const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0)); const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0)); vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0); vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0); const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0)); vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0); vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0); const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0)); vacc2x0 = vbfmlalbq_f32(vacc2x0, va2x0, vb0); vacc2x0 = vbfmlaltq_f32(vacc2x0, va2x0, vb0); const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0)); vacc3x0 = vbfmlalbq_f32(vacc3x0, va3x0, vb0); vacc3x0 = vbfmlaltq_f32(vacc3x0, va3x0, vb0); const bfloat16x8_t va4x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm0)); vacc4x0 = vbfmlalbq_f32(vacc4x0, va4x0, vb0); vacc4x0 = vbfmlaltq_f32(vacc4x0, va4x0, vb0); const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1)); vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1); vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1); const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1)); vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1); vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1); const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1)); vacc2x1 = vbfmlalbq_f32(vacc2x1, va2x1, vb1); vacc2x1 = vbfmlaltq_f32(vacc2x1, va2x1, vb1); const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1)); vacc3x1 = vbfmlalbq_f32(vacc3x1, va3x1, vb1); vacc3x1 = vbfmlaltq_f32(vacc3x1, va3x1, vb1); const bfloat16x8_t va4x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm1)); vacc4x1 = vbfmlalbq_f32(vacc4x1, va4x1, vb1); vacc4x1 = vbfmlaltq_f32(vacc4x1, va4x1, vb1); const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2)); vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2); vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2); const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2)); vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2); vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2); const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2)); vacc2x2 = vbfmlalbq_f32(vacc2x2, va2x2, vb2); vacc2x2 = vbfmlaltq_f32(vacc2x2, va2x2, vb2); const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2)); vacc3x2 = vbfmlalbq_f32(vacc3x2, va3x2, vb2); vacc3x2 = vbfmlaltq_f32(vacc3x2, va3x2, vb2); const bfloat16x8_t va4x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm2)); vacc4x2 = vbfmlalbq_f32(vacc4x2, va4x2, vb2); vacc4x2 = vbfmlaltq_f32(vacc4x2, va4x2, vb2); const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3)); vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3); vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3); const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3)); vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3); vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3); const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3)); vacc2x3 = vbfmlalbq_f32(vacc2x3, va2x3, vb3); vacc2x3 = vbfmlaltq_f32(vacc2x3, va2x3, vb3); const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3)); vacc3x3 = vbfmlalbq_f32(vacc3x3, va3x3, vb3); vacc3x3 = vbfmlaltq_f32(vacc3x3, va3x3, vb3); const bfloat16x8_t va4x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm3)); vacc4x3 = vbfmlalbq_f32(vacc4x3, va4x3, vb3); vacc4x3 = vbfmlaltq_f32(vacc4x3, va4x3, vb3); } #if XNN_ARCH_ARM64 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1); const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1); const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1); const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1); const float32x4_t vacc4x01 = vpaddq_f32(vacc4x0, vacc4x1); const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3); const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3); const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3); const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3); const float32x4_t vacc4x23 = vpaddq_f32(vacc4x2, vacc4x3); float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23); float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23); float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); float32x4_t vacc4x0123 = vpaddq_f32(vacc4x01, vacc4x23); #else const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0)); const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0)); const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0)); const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0)); const float32x2_t vsum4x0 = vadd_f32(vget_low_f32(vacc4x0), vget_high_f32(vacc4x0)); const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1)); const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1)); const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1)); const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1)); const float32x2_t vsum4x1 = vadd_f32(vget_low_f32(vacc4x1), vget_high_f32(vacc4x1)); const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2)); const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2)); const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2)); const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2)); const float32x2_t vsum4x2 = vadd_f32(vget_low_f32(vacc4x2), vget_high_f32(vacc4x2)); const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3)); const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3)); const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3)); const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3)); const float32x2_t vsum4x3 = vadd_f32(vget_low_f32(vacc4x3), vget_high_f32(vacc4x3)); float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3)); float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3)); float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); float32x4_t vacc4x0123 = vcombine_f32(vpadd_f32(vsum4x0, vsum4x1), vpadd_f32(vsum4x2, vsum4x3)); #endif const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123); bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123); bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123); bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123); bfloat16x4_t vout4x0123 = vcvt_bf16_f32(vacc4x0123); if XNN_LIKELY(nc >= 4) { vst1_bf16(c0, vout0x0123); c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride); vst1_bf16(c1, vout1x0123); c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride); vst1_bf16(c2, vout2x0123); c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride); vst1_bf16(c3, vout3x0123); c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride); vst1_bf16(c4, vout4x0123); c4 = (bfloat16_t*) ((uintptr_t) c4 + cn_stride); a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc); a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc); a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc); a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc); a4 = (const bfloat16_t*) ((uintptr_t) a4 - kc); nc -= 4; } else { if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_bf16(vout4x0123), 0); c4 += 2; vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2)); vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2)); vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2)); vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2)); vout4x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout4x0123), vreinterpret_u16_bf16(vout4x0123), 2)); } if (nc & 1) { vst1_lane_bf16(c0, vout0x0123, 0); vst1_lane_bf16(c1, vout1x0123, 0); vst1_lane_bf16(c2, vout2x0123, 0); vst1_lane_bf16(c3, vout3x0123, 0); vst1_lane_bf16(c4, vout4x0123, 0); } nc = 0; } } while (nc != 0); }
16,851
49.45509
126
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-5x4c8-minmax-neonfma-shland.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c8-neon-shland.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_5x4c8__neonfma_shland( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(uint16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const uint16_t* a0 = (const uint16_t*) a; uint16_t* c0 = (uint16_t*) c; const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride); uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride); uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride); uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride); uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const uint16_t* w = (const uint16_t*) w_ptr; const uint16x8_t vmask = vreinterpretq_u16_u32(vmovq_n_u32(UINT32_C(0xFFFF0000))); do { float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc1x0 = vacc0x0; float32x4_t vacc1x1 = vacc0x1; float32x4_t vacc1x2 = vacc0x2; float32x4_t vacc1x3 = vacc0x3; float32x4_t vacc2x0 = vacc0x0; float32x4_t vacc2x1 = vacc0x1; float32x4_t vacc2x2 = vacc0x2; float32x4_t vacc2x3 = vacc0x3; float32x4_t vacc3x0 = vacc0x0; float32x4_t vacc3x1 = vacc0x1; float32x4_t vacc3x2 = vacc0x2; float32x4_t vacc3x3 = vacc0x3; float32x4_t vacc4x0 = vacc0x0; float32x4_t vacc4x1 = vacc0x1; float32x4_t vacc4x2 = vacc0x2; float32x4_t vacc4x3 = vacc0x3; size_t k = kc; for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) { const uint16x8_t va0 = vld1q_u16(a0); a0 += 8; const uint16x8_t va1 = vld1q_u16(a1); a1 += 8; const uint16x8_t va2 = vld1q_u16(a2); a2 += 8; const uint16x8_t va3 = vld1q_u16(a3); a3 += 8; const uint16x8_t va4 = vld1q_u16(a4); a4 += 8; const uint16x8_t vb0 = vld1q_u16(w); w += 8; const uint16x8_t vb1 = vld1q_u16(w); w += 8; const uint16x8_t vb2 = vld1q_u16(w); w += 8; const uint16x8_t vb3 = vld1q_u16(w); w += 8; const float32x4_t va0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0), 16)); const float32x4_t va1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1), 16)); const float32x4_t va2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2), 16)); const float32x4_t va3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3), 16)); const float32x4_t va4e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va4), 16)); const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16)); const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16)); const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16)); const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16)); vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e); vacc1x0 = vfmaq_f32(vacc1x0, va1e, vb0e); vacc2x0 = vfmaq_f32(vacc2x0, va2e, vb0e); vacc3x0 = vfmaq_f32(vacc3x0, va3e, vb0e); vacc4x0 = vfmaq_f32(vacc4x0, va4e, vb0e); vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e); vacc1x1 = vfmaq_f32(vacc1x1, va1e, vb1e); vacc2x1 = vfmaq_f32(vacc2x1, va2e, vb1e); vacc3x1 = vfmaq_f32(vacc3x1, va3e, vb1e); vacc4x1 = vfmaq_f32(vacc4x1, va4e, vb1e); vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e); vacc1x2 = vfmaq_f32(vacc1x2, va1e, vb2e); vacc2x2 = vfmaq_f32(vacc2x2, va2e, vb2e); vacc3x2 = vfmaq_f32(vacc3x2, va3e, vb2e); vacc4x2 = vfmaq_f32(vacc4x2, va4e, vb2e); vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e); vacc1x3 = vfmaq_f32(vacc1x3, va1e, vb3e); vacc2x3 = vfmaq_f32(vacc2x3, va2e, vb3e); vacc3x3 = vfmaq_f32(vacc3x3, va3e, vb3e); vacc4x3 = vfmaq_f32(vacc4x3, va4e, vb3e); const float32x4_t va0o = vreinterpretq_f32_u16(vandq_u16(va0, vmask)); const float32x4_t va1o = vreinterpretq_f32_u16(vandq_u16(va1, vmask)); const float32x4_t va2o = vreinterpretq_f32_u16(vandq_u16(va2, vmask)); const float32x4_t va3o = vreinterpretq_f32_u16(vandq_u16(va3, vmask)); const float32x4_t va4o = vreinterpretq_f32_u16(vandq_u16(va4, vmask)); const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask)); const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask)); const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask)); const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask)); vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o); vacc1x0 = vfmaq_f32(vacc1x0, va1o, vb0o); vacc2x0 = vfmaq_f32(vacc2x0, va2o, vb0o); vacc3x0 = vfmaq_f32(vacc3x0, va3o, vb0o); vacc4x0 = vfmaq_f32(vacc4x0, va4o, vb0o); vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o); vacc1x1 = vfmaq_f32(vacc1x1, va1o, vb1o); vacc2x1 = vfmaq_f32(vacc2x1, va2o, vb1o); vacc3x1 = vfmaq_f32(vacc3x1, va3o, vb1o); vacc4x1 = vfmaq_f32(vacc4x1, va4o, vb1o); vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o); vacc1x2 = vfmaq_f32(vacc1x2, va1o, vb2o); vacc2x2 = vfmaq_f32(vacc2x2, va2o, vb2o); vacc3x2 = vfmaq_f32(vacc3x2, va3o, vb2o); vacc4x2 = vfmaq_f32(vacc4x2, va4o, vb2o); vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o); vacc1x3 = vfmaq_f32(vacc1x3, va1o, vb3o); vacc2x3 = vfmaq_f32(vacc2x3, va2o, vb3o); vacc3x3 = vfmaq_f32(vacc3x3, va3o, vb3o); vacc4x3 = vfmaq_f32(vacc4x3, va4o, vb3o); } if XNN_UNLIKELY(k != 0) { const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k); const uint16x8_t va1 = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k); const uint16x8_t va2 = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k); const uint16x8_t va3 = vld1q_u16(a3); a3 = (const uint16_t*) ((uintptr_t) a3 + k); const uint16x8_t va4 = vld1q_u16(a4); a4 = (const uint16_t*) ((uintptr_t) a4 + k); const uint16x8_t vb0 = vld1q_u16(w); w += 8; const uint16x8_t vb1 = vld1q_u16(w); w += 8; const uint16x8_t vb2 = vld1q_u16(w); w += 8; const uint16x8_t vb3 = vld1q_u16(w); w += 8; const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0)); const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0)); const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0)); const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0)); const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16)); const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16)); const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16)); const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16)); const uint16x8_t va0x0 = vbicq_u16(va0, vm0); const uint16x8_t va1x0 = vbicq_u16(va1, vm0); const uint16x8_t va2x0 = vbicq_u16(va2, vm0); const uint16x8_t va3x0 = vbicq_u16(va3, vm0); const uint16x8_t va4x0 = vbicq_u16(va4, vm0); const uint16x8_t va0x1 = vbicq_u16(va0, vm1); const uint16x8_t va1x1 = vbicq_u16(va1, vm1); const uint16x8_t va2x1 = vbicq_u16(va2, vm1); const uint16x8_t va3x1 = vbicq_u16(va3, vm1); const uint16x8_t va4x1 = vbicq_u16(va4, vm1); const uint16x8_t va0x2 = vbicq_u16(va0, vm2); const uint16x8_t va1x2 = vbicq_u16(va1, vm2); const uint16x8_t va2x2 = vbicq_u16(va2, vm2); const uint16x8_t va3x2 = vbicq_u16(va3, vm2); const uint16x8_t va4x2 = vbicq_u16(va4, vm2); const uint16x8_t va0x3 = vbicq_u16(va0, vm3); const uint16x8_t va1x3 = vbicq_u16(va1, vm3); const uint16x8_t va2x3 = vbicq_u16(va2, vm3); const uint16x8_t va3x3 = vbicq_u16(va3, vm3); const uint16x8_t va4x3 = vbicq_u16(va4, vm3); const float32x4_t va0x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x0), 16)); const float32x4_t va1x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x0), 16)); const float32x4_t va2x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x0), 16)); const float32x4_t va3x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x0), 16)); const float32x4_t va4x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va4x0), 16)); const float32x4_t va0x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x1), 16)); const float32x4_t va1x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x1), 16)); const float32x4_t va2x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x1), 16)); const float32x4_t va3x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x1), 16)); const float32x4_t va4x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va4x1), 16)); const float32x4_t va0x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x2), 16)); const float32x4_t va1x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x2), 16)); const float32x4_t va2x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x2), 16)); const float32x4_t va3x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x2), 16)); const float32x4_t va4x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va4x2), 16)); const float32x4_t va0x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x3), 16)); const float32x4_t va1x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x3), 16)); const float32x4_t va2x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x3), 16)); const float32x4_t va3x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x3), 16)); const float32x4_t va4x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va4x3), 16)); vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e); vacc1x0 = vfmaq_f32(vacc1x0, va1x0e, vb0e); vacc2x0 = vfmaq_f32(vacc2x0, va2x0e, vb0e); vacc3x0 = vfmaq_f32(vacc3x0, va3x0e, vb0e); vacc4x0 = vfmaq_f32(vacc4x0, va4x0e, vb0e); vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e); vacc1x1 = vfmaq_f32(vacc1x1, va1x1e, vb1e); vacc2x1 = vfmaq_f32(vacc2x1, va2x1e, vb1e); vacc3x1 = vfmaq_f32(vacc3x1, va3x1e, vb1e); vacc4x1 = vfmaq_f32(vacc4x1, va4x1e, vb1e); vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e); vacc1x2 = vfmaq_f32(vacc1x2, va1x2e, vb2e); vacc2x2 = vfmaq_f32(vacc2x2, va2x2e, vb2e); vacc3x2 = vfmaq_f32(vacc3x2, va3x2e, vb2e); vacc4x2 = vfmaq_f32(vacc4x2, va4x2e, vb2e); vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e); vacc1x3 = vfmaq_f32(vacc1x3, va1x3e, vb3e); vacc2x3 = vfmaq_f32(vacc2x3, va2x3e, vb3e); vacc3x3 = vfmaq_f32(vacc3x3, va3x3e, vb3e); vacc4x3 = vfmaq_f32(vacc4x3, va4x3e, vb3e); const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask)); const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask)); const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask)); const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask)); const float32x4_t va0x0o = vreinterpretq_f32_u16(vandq_u16(va0x0, vmask)); const float32x4_t va1x0o = vreinterpretq_f32_u16(vandq_u16(va1x0, vmask)); const float32x4_t va2x0o = vreinterpretq_f32_u16(vandq_u16(va2x0, vmask)); const float32x4_t va3x0o = vreinterpretq_f32_u16(vandq_u16(va3x0, vmask)); const float32x4_t va4x0o = vreinterpretq_f32_u16(vandq_u16(va4x0, vmask)); const float32x4_t va0x1o = vreinterpretq_f32_u16(vandq_u16(va0x1, vmask)); const float32x4_t va1x1o = vreinterpretq_f32_u16(vandq_u16(va1x1, vmask)); const float32x4_t va2x1o = vreinterpretq_f32_u16(vandq_u16(va2x1, vmask)); const float32x4_t va3x1o = vreinterpretq_f32_u16(vandq_u16(va3x1, vmask)); const float32x4_t va4x1o = vreinterpretq_f32_u16(vandq_u16(va4x1, vmask)); const float32x4_t va0x2o = vreinterpretq_f32_u16(vandq_u16(va0x2, vmask)); const float32x4_t va1x2o = vreinterpretq_f32_u16(vandq_u16(va1x2, vmask)); const float32x4_t va2x2o = vreinterpretq_f32_u16(vandq_u16(va2x2, vmask)); const float32x4_t va3x2o = vreinterpretq_f32_u16(vandq_u16(va3x2, vmask)); const float32x4_t va4x2o = vreinterpretq_f32_u16(vandq_u16(va4x2, vmask)); const float32x4_t va0x3o = vreinterpretq_f32_u16(vandq_u16(va0x3, vmask)); const float32x4_t va1x3o = vreinterpretq_f32_u16(vandq_u16(va1x3, vmask)); const float32x4_t va2x3o = vreinterpretq_f32_u16(vandq_u16(va2x3, vmask)); const float32x4_t va3x3o = vreinterpretq_f32_u16(vandq_u16(va3x3, vmask)); const float32x4_t va4x3o = vreinterpretq_f32_u16(vandq_u16(va4x3, vmask)); vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o); vacc1x0 = vfmaq_f32(vacc1x0, va1x0o, vb0o); vacc2x0 = vfmaq_f32(vacc2x0, va2x0o, vb0o); vacc3x0 = vfmaq_f32(vacc3x0, va3x0o, vb0o); vacc4x0 = vfmaq_f32(vacc4x0, va4x0o, vb0o); vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o); vacc1x1 = vfmaq_f32(vacc1x1, va1x1o, vb1o); vacc2x1 = vfmaq_f32(vacc2x1, va2x1o, vb1o); vacc3x1 = vfmaq_f32(vacc3x1, va3x1o, vb1o); vacc4x1 = vfmaq_f32(vacc4x1, va4x1o, vb1o); vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o); vacc1x2 = vfmaq_f32(vacc1x2, va1x2o, vb2o); vacc2x2 = vfmaq_f32(vacc2x2, va2x2o, vb2o); vacc3x2 = vfmaq_f32(vacc3x2, va3x2o, vb2o); vacc4x2 = vfmaq_f32(vacc4x2, va4x2o, vb2o); vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o); vacc1x3 = vfmaq_f32(vacc1x3, va1x3o, vb3o); vacc2x3 = vfmaq_f32(vacc2x3, va2x3o, vb3o); vacc3x3 = vfmaq_f32(vacc3x3, va3x3o, vb3o); vacc4x3 = vfmaq_f32(vacc4x3, va4x3o, vb3o); } #if XNN_ARCH_ARM64 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1); const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1); const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1); const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1); const float32x4_t vacc4x01 = vpaddq_f32(vacc4x0, vacc4x1); const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3); const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3); const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3); const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3); const float32x4_t vacc4x23 = vpaddq_f32(vacc4x2, vacc4x3); float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23); float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23); float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); float32x4_t vacc4x0123 = vpaddq_f32(vacc4x01, vacc4x23); #else const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0)); const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0)); const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0)); const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0)); const float32x2_t vsum4x0 = vadd_f32(vget_low_f32(vacc4x0), vget_high_f32(vacc4x0)); const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1)); const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1)); const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1)); const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1)); const float32x2_t vsum4x1 = vadd_f32(vget_low_f32(vacc4x1), vget_high_f32(vacc4x1)); const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2)); const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2)); const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2)); const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2)); const float32x2_t vsum4x2 = vadd_f32(vget_low_f32(vacc4x2), vget_high_f32(vacc4x2)); const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3)); const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3)); const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3)); const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3)); const float32x2_t vsum4x3 = vadd_f32(vget_low_f32(vacc4x3), vget_high_f32(vacc4x3)); float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3)); float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3)); float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); float32x4_t vacc4x0123 = vcombine_f32(vpadd_f32(vsum4x0, vsum4x1), vpadd_f32(vsum4x2, vsum4x3)); #endif const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16); uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16); uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16); uint16x4_t vout3x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc3x0123), 16); uint16x4_t vout4x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc4x0123), 16); if XNN_LIKELY(nc >= 4) { vst1_u16(c0, vout0x0123); c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride); vst1_u16(c1, vout1x0123); c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride); vst1_u16(c2, vout2x0123); c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride); vst1_u16(c3, vout3x0123); c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride); vst1_u16(c4, vout4x0123); c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride); a0 = (const uint16_t*) ((uintptr_t) a0 - kc); a1 = (const uint16_t*) ((uintptr_t) a1 - kc); a2 = (const uint16_t*) ((uintptr_t) a2 - kc); a3 = (const uint16_t*) ((uintptr_t) a3 - kc); a4 = (const uint16_t*) ((uintptr_t) a4 - kc); nc -= 4; } else { if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_u16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_u16(vout4x0123), 0); c4 += 2; vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2); vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2); vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2); vout3x0123 = vext_u16(vout3x0123, vout3x0123, 2); vout4x0123 = vext_u16(vout4x0123, vout4x0123, 2); } if (nc & 1) { vst1_lane_u16(c0, vout0x0123, 0); vst1_lane_u16(c1, vout1x0123, 0); vst1_lane_u16(c2, vout2x0123, 0); vst1_lane_u16(c3, vout3x0123, 0); vst1_lane_u16(c4, vout4x0123, 0); } nc = 0; } } while (nc != 0); }
21,285
50.790754
109
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-5x4c8-minmax-neonfma-zip.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c8-neon-zip.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_5x4c8__neonfma_zip( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(uint16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const uint16_t* a0 = (const uint16_t*) a; uint16_t* c0 = (uint16_t*) c; const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride); uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride); uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride); uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride); uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const uint16_t* w = (const uint16_t*) w_ptr; const uint16x8_t vzero = vmovq_n_u16(0); do { float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1; float32x4_t vacc1x0 = vacc0x0; float32x4_t vacc1x1 = vacc0x1; float32x4_t vacc1x2 = vacc0x2; float32x4_t vacc1x3 = vacc0x3; float32x4_t vacc2x0 = vacc0x0; float32x4_t vacc2x1 = vacc0x1; float32x4_t vacc2x2 = vacc0x2; float32x4_t vacc2x3 = vacc0x3; float32x4_t vacc3x0 = vacc0x0; float32x4_t vacc3x1 = vacc0x1; float32x4_t vacc3x2 = vacc0x2; float32x4_t vacc3x3 = vacc0x3; float32x4_t vacc4x0 = vacc0x0; float32x4_t vacc4x1 = vacc0x1; float32x4_t vacc4x2 = vacc0x2; float32x4_t vacc4x3 = vacc0x3; size_t k = kc; for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) { const uint16x8_t va0h = vld1q_u16(a0); a0 += 8; const uint16x8_t va1h = vld1q_u16(a1); a1 += 8; const uint16x8_t va2h = vld1q_u16(a2); a2 += 8; const uint16x8_t va3h = vld1q_u16(a3); a3 += 8; const uint16x8_t va4h = vld1q_u16(a4); a4 += 8; const uint16x8_t vb0h = vld1q_u16(w); w += 8; const uint16x8_t vb1h = vld1q_u16(w); w += 8; const uint16x8_t vb2h = vld1q_u16(w); w += 8; const uint16x8_t vb3h = vld1q_u16(w); w += 8; const uint16x8x2_t va0f = vzipq_u16(vzero, va0h); const uint16x8x2_t va1f = vzipq_u16(vzero, va1h); const uint16x8x2_t va2f = vzipq_u16(vzero, va2h); const uint16x8x2_t va3f = vzipq_u16(vzero, va3h); const uint16x8x2_t va4f = vzipq_u16(vzero, va4h); const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h); const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h); const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h); const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h); vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc4x0 = vfmaq_f32(vacc4x0, vreinterpretq_f32_u16(va4f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc4x1 = vfmaq_f32(vacc4x1, vreinterpretq_f32_u16(va4f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc4x2 = vfmaq_f32(vacc4x2, vreinterpretq_f32_u16(va4f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc4x3 = vfmaq_f32(vacc4x3, vreinterpretq_f32_u16(va4f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc4x0 = vfmaq_f32(vacc4x0, vreinterpretq_f32_u16(va4f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc4x1 = vfmaq_f32(vacc4x1, vreinterpretq_f32_u16(va4f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc4x2 = vfmaq_f32(vacc4x2, vreinterpretq_f32_u16(va4f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc4x3 = vfmaq_f32(vacc4x3, vreinterpretq_f32_u16(va4f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); } if XNN_UNLIKELY(k != 0) { const uint16x8_t va0h = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k); const uint16x8_t va1h = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k); const uint16x8_t va2h = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k); const uint16x8_t va3h = vld1q_u16(a3); a3 = (const uint16_t*) ((uintptr_t) a3 + k); const uint16x8_t va4h = vld1q_u16(a4); a4 = (const uint16_t*) ((uintptr_t) a4 + k); const uint16x8_t vb0h = vld1q_u16(w); w += 8; const uint16x8_t vb1h = vld1q_u16(w); w += 8; const uint16x8_t vb2h = vld1q_u16(w); w += 8; const uint16x8_t vb3h = vld1q_u16(w); w += 8; const uint16x8_t vm0h = vceqq_u16(vb0h, vmovq_n_u16(0)); const uint16x8_t vm1h = vceqq_u16(vb1h, vmovq_n_u16(0)); const uint16x8_t vm2h = vceqq_u16(vb2h, vmovq_n_u16(0)); const uint16x8_t vm3h = vceqq_u16(vb3h, vmovq_n_u16(0)); const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h); const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h); const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h); const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h); const uint16x8_t va0x0h = vbicq_u16(va0h, vm0h); const uint16x8_t va1x0h = vbicq_u16(va1h, vm0h); const uint16x8_t va2x0h = vbicq_u16(va2h, vm0h); const uint16x8_t va3x0h = vbicq_u16(va3h, vm0h); const uint16x8_t va4x0h = vbicq_u16(va4h, vm0h); const uint16x8_t va0x1h = vbicq_u16(va0h, vm1h); const uint16x8_t va1x1h = vbicq_u16(va1h, vm1h); const uint16x8_t va2x1h = vbicq_u16(va2h, vm1h); const uint16x8_t va3x1h = vbicq_u16(va3h, vm1h); const uint16x8_t va4x1h = vbicq_u16(va4h, vm1h); const uint16x8_t va0x2h = vbicq_u16(va0h, vm2h); const uint16x8_t va1x2h = vbicq_u16(va1h, vm2h); const uint16x8_t va2x2h = vbicq_u16(va2h, vm2h); const uint16x8_t va3x2h = vbicq_u16(va3h, vm2h); const uint16x8_t va4x2h = vbicq_u16(va4h, vm2h); const uint16x8_t va0x3h = vbicq_u16(va0h, vm3h); const uint16x8_t va1x3h = vbicq_u16(va1h, vm3h); const uint16x8_t va2x3h = vbicq_u16(va2h, vm3h); const uint16x8_t va3x3h = vbicq_u16(va3h, vm3h); const uint16x8_t va4x3h = vbicq_u16(va4h, vm3h); const uint16x8x2_t va0x0f = vzipq_u16(vzero, va0x0h); const uint16x8x2_t va1x0f = vzipq_u16(vzero, va1x0h); const uint16x8x2_t va2x0f = vzipq_u16(vzero, va2x0h); const uint16x8x2_t va3x0f = vzipq_u16(vzero, va3x0h); const uint16x8x2_t va4x0f = vzipq_u16(vzero, va4x0h); const uint16x8x2_t va0x1f = vzipq_u16(vzero, va0x1h); const uint16x8x2_t va1x1f = vzipq_u16(vzero, va1x1h); const uint16x8x2_t va2x1f = vzipq_u16(vzero, va2x1h); const uint16x8x2_t va3x1f = vzipq_u16(vzero, va3x1h); const uint16x8x2_t va4x1f = vzipq_u16(vzero, va4x1h); const uint16x8x2_t va0x2f = vzipq_u16(vzero, va0x2h); const uint16x8x2_t va1x2f = vzipq_u16(vzero, va1x2h); const uint16x8x2_t va2x2f = vzipq_u16(vzero, va2x2h); const uint16x8x2_t va3x2f = vzipq_u16(vzero, va3x2h); const uint16x8x2_t va4x2f = vzipq_u16(vzero, va4x2h); const uint16x8x2_t va0x3f = vzipq_u16(vzero, va0x3h); const uint16x8x2_t va1x3f = vzipq_u16(vzero, va1x3h); const uint16x8x2_t va2x3f = vzipq_u16(vzero, va2x3h); const uint16x8x2_t va3x3f = vzipq_u16(vzero, va3x3h); const uint16x8x2_t va4x3f = vzipq_u16(vzero, va4x3h); vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc4x0 = vfmaq_f32(vacc4x0, vreinterpretq_f32_u16(va4x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0])); vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc4x1 = vfmaq_f32(vacc4x1, vreinterpretq_f32_u16(va4x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0])); vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc4x2 = vfmaq_f32(vacc4x2, vreinterpretq_f32_u16(va4x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0])); vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc4x3 = vfmaq_f32(vacc4x3, vreinterpretq_f32_u16(va4x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0])); vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc4x0 = vfmaq_f32(vacc4x0, vreinterpretq_f32_u16(va4x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1])); vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc4x1 = vfmaq_f32(vacc4x1, vreinterpretq_f32_u16(va4x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1])); vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc4x2 = vfmaq_f32(vacc4x2, vreinterpretq_f32_u16(va4x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1])); vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); vacc4x3 = vfmaq_f32(vacc4x3, vreinterpretq_f32_u16(va4x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1])); } #if XNN_ARCH_ARM64 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1); const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1); const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1); const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1); const float32x4_t vacc4x01 = vpaddq_f32(vacc4x0, vacc4x1); const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3); const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3); const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3); const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3); const float32x4_t vacc4x23 = vpaddq_f32(vacc4x2, vacc4x3); float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23); float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23); float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); float32x4_t vacc4x0123 = vpaddq_f32(vacc4x01, vacc4x23); #else const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0)); const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0)); const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0)); const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0)); const float32x2_t vsum4x0 = vadd_f32(vget_low_f32(vacc4x0), vget_high_f32(vacc4x0)); const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1)); const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1)); const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1)); const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1)); const float32x2_t vsum4x1 = vadd_f32(vget_low_f32(vacc4x1), vget_high_f32(vacc4x1)); const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2)); const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2)); const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2)); const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2)); const float32x2_t vsum4x2 = vadd_f32(vget_low_f32(vacc4x2), vget_high_f32(vacc4x2)); const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3)); const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3)); const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3)); const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3)); const float32x2_t vsum4x3 = vadd_f32(vget_low_f32(vacc4x3), vget_high_f32(vacc4x3)); float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3)); float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3)); float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); float32x4_t vacc4x0123 = vcombine_f32(vpadd_f32(vsum4x0, vsum4x1), vpadd_f32(vsum4x2, vsum4x3)); #endif const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16); uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16); uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16); uint16x4_t vout3x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc3x0123), 16); uint16x4_t vout4x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc4x0123), 16); if XNN_LIKELY(nc >= 4) { vst1_u16(c0, vout0x0123); c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride); vst1_u16(c1, vout1x0123); c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride); vst1_u16(c2, vout2x0123); c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride); vst1_u16(c3, vout3x0123); c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride); vst1_u16(c4, vout4x0123); c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride); a0 = (const uint16_t*) ((uintptr_t) a0 - kc); a1 = (const uint16_t*) ((uintptr_t) a1 - kc); a2 = (const uint16_t*) ((uintptr_t) a2 - kc); a3 = (const uint16_t*) ((uintptr_t) a3 - kc); a4 = (const uint16_t*) ((uintptr_t) a4 - kc); nc -= 4; } else { if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_u16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_u16(vout4x0123), 0); c4 += 2; vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2); vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2); vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2); vout3x0123 = vext_u16(vout3x0123, vout3x0123, 2); vout4x0123 = vext_u16(vout4x0123, vout4x0123, 2); } if (nc & 1) { vst1_lane_u16(c0, vout0x0123, 0); vst1_lane_u16(c1, vout1x0123, 0); vst1_lane_u16(c2, vout2x0123, 0); vst1_lane_u16(c3, vout3x0123, 0); vst1_lane_u16(c4, vout4x0123, 0); } nc = 0; } } while (nc != 0); }
22,079
58.037433
109
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-5x8c2-minmax-neonbf16-bfdot-lane-ld128.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c2-neonbf16-bfdot-lane-ld128.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_5x8c2__neonbf16_bfdot_lane_ld128( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 5); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(bfloat16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const bfloat16_t* a0 = (const bfloat16_t*) a; bfloat16_t* c0 = (bfloat16_t*) c; const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride); bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride); bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride); bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const bfloat16_t* a4 = (const bfloat16_t*) ((uintptr_t) a3 + a_stride); bfloat16_t* c4 = (bfloat16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const bfloat16_t* w = (const bfloat16_t*) w_ptr; do { float32x4_t vacc0x0123 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc0x4567 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; size_t k = kc; for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8; const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8; const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8; const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8; const bfloat16x8_t va4 = vld1q_bf16(a4); a4 += 8; const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c01, va0, 0); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c01, va1, 0); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c01, va2, 0); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c01, va3, 0); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c01, va4, 0); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c01, va0, 0); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c01, va1, 0); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c01, va2, 0); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c01, va3, 0); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c01, va4, 0); const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c23, va0, 1); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c23, va1, 1); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c23, va2, 1); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c23, va3, 1); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c23, va4, 1); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c23, va0, 1); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c23, va1, 1); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c23, va2, 1); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c23, va3, 1); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c23, va4, 1); const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c45, va0, 2); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c45, va1, 2); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c45, va2, 2); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c45, va3, 2); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c45, va4, 2); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c45, va0, 2); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c45, va1, 2); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c45, va2, 2); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c45, va3, 2); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c45, va4, 2); const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c67, va0, 3); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c67, va1, 3); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c67, va2, 3); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c67, va3, 3); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c67, va4, 3); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c67, va0, 3); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c67, va1, 3); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c67, va2, 3); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c67, va3, 3); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c67, va4, 3); } if XNN_UNLIKELY(k != 0) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k); const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k); const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k); const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k); const bfloat16x8_t va4 = vld1q_bf16(a4); a4 = (const bfloat16_t*) ((uintptr_t) a4 + k); const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; const uint32x4_t va0c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 0); const uint32x4_t va1c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 0); const uint32x4_t va2c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 0); const uint32x4_t va3c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 0); const uint32x4_t va4c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va4)), 0); const uint32x4_t vm0123c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c01), vmovq_n_u16(0))); const uint32x4_t vm4567c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c01), vmovq_n_u16(0))); const uint32x4_t va0x0123c01 = vbicq_u32(va0c01, vm0123c01); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c01, vreinterpretq_bf16_u32(va0x0123c01)); const uint32x4_t va1x0123c01 = vbicq_u32(va1c01, vm0123c01); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c01, vreinterpretq_bf16_u32(va1x0123c01)); const uint32x4_t va2x0123c01 = vbicq_u32(va2c01, vm0123c01); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c01, vreinterpretq_bf16_u32(va2x0123c01)); const uint32x4_t va3x0123c01 = vbicq_u32(va3c01, vm0123c01); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c01, vreinterpretq_bf16_u32(va3x0123c01)); const uint32x4_t va4x0123c01 = vbicq_u32(va4c01, vm0123c01); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c01, vreinterpretq_bf16_u32(va4x0123c01)); const uint32x4_t va0x4567c01 = vbicq_u32(va0c01, vm4567c01); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c01, vreinterpretq_bf16_u32(va0x4567c01)); const uint32x4_t va1x4567c01 = vbicq_u32(va1c01, vm4567c01); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c01, vreinterpretq_bf16_u32(va1x4567c01)); const uint32x4_t va2x4567c01 = vbicq_u32(va2c01, vm4567c01); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c01, vreinterpretq_bf16_u32(va2x4567c01)); const uint32x4_t va3x4567c01 = vbicq_u32(va3c01, vm4567c01); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c01, vreinterpretq_bf16_u32(va3x4567c01)); const uint32x4_t va4x4567c01 = vbicq_u32(va4c01, vm4567c01); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c01, vreinterpretq_bf16_u32(va4x4567c01)); if (k > 2 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; const uint32x4_t va0c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 1); const uint32x4_t va1c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 1); const uint32x4_t va2c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 1); const uint32x4_t va3c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 1); const uint32x4_t va4c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va4)), 1); const uint32x4_t vm0123c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c23), vmovq_n_u16(0))); const uint32x4_t vm4567c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c23), vmovq_n_u16(0))); const uint32x4_t va0x0123c23 = vbicq_u32(va0c23, vm0123c23); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c23, vreinterpretq_bf16_u32(va0x0123c23)); const uint32x4_t va1x0123c23 = vbicq_u32(va1c23, vm0123c23); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c23, vreinterpretq_bf16_u32(va1x0123c23)); const uint32x4_t va2x0123c23 = vbicq_u32(va2c23, vm0123c23); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c23, vreinterpretq_bf16_u32(va2x0123c23)); const uint32x4_t va3x0123c23 = vbicq_u32(va3c23, vm0123c23); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c23, vreinterpretq_bf16_u32(va3x0123c23)); const uint32x4_t va4x0123c23 = vbicq_u32(va4c23, vm0123c23); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c23, vreinterpretq_bf16_u32(va4x0123c23)); const uint32x4_t va0x4567c23 = vbicq_u32(va0c23, vm4567c23); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c23, vreinterpretq_bf16_u32(va0x4567c23)); const uint32x4_t va1x4567c23 = vbicq_u32(va1c23, vm4567c23); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c23, vreinterpretq_bf16_u32(va1x4567c23)); const uint32x4_t va2x4567c23 = vbicq_u32(va2c23, vm4567c23); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c23, vreinterpretq_bf16_u32(va2x4567c23)); const uint32x4_t va3x4567c23 = vbicq_u32(va3c23, vm4567c23); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c23, vreinterpretq_bf16_u32(va3x4567c23)); const uint32x4_t va4x4567c23 = vbicq_u32(va4c23, vm4567c23); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c23, vreinterpretq_bf16_u32(va4x4567c23)); if (k > 4 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; const uint32x4_t va0c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 0); const uint32x4_t va1c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 0); const uint32x4_t va2c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 0); const uint32x4_t va3c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 0); const uint32x4_t va4c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va4)), 0); const uint32x4_t vm0123c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c45), vmovq_n_u16(0))); const uint32x4_t vm4567c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c45), vmovq_n_u16(0))); const uint32x4_t va0x0123c45 = vbicq_u32(va0c45, vm0123c45); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c45, vreinterpretq_bf16_u32(va0x0123c45)); const uint32x4_t va1x0123c45 = vbicq_u32(va1c45, vm0123c45); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c45, vreinterpretq_bf16_u32(va1x0123c45)); const uint32x4_t va2x0123c45 = vbicq_u32(va2c45, vm0123c45); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c45, vreinterpretq_bf16_u32(va2x0123c45)); const uint32x4_t va3x0123c45 = vbicq_u32(va3c45, vm0123c45); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c45, vreinterpretq_bf16_u32(va3x0123c45)); const uint32x4_t va4x0123c45 = vbicq_u32(va4c45, vm0123c45); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c45, vreinterpretq_bf16_u32(va4x0123c45)); const uint32x4_t va0x4567c45 = vbicq_u32(va0c45, vm4567c45); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c45, vreinterpretq_bf16_u32(va0x4567c45)); const uint32x4_t va1x4567c45 = vbicq_u32(va1c45, vm4567c45); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c45, vreinterpretq_bf16_u32(va1x4567c45)); const uint32x4_t va2x4567c45 = vbicq_u32(va2c45, vm4567c45); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c45, vreinterpretq_bf16_u32(va2x4567c45)); const uint32x4_t va3x4567c45 = vbicq_u32(va3c45, vm4567c45); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c45, vreinterpretq_bf16_u32(va3x4567c45)); const uint32x4_t va4x4567c45 = vbicq_u32(va4c45, vm4567c45); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c45, vreinterpretq_bf16_u32(va4x4567c45)); if (k > 6 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; const uint32x4_t va0c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 1); const uint32x4_t va1c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 1); const uint32x4_t va2c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 1); const uint32x4_t va3c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 1); const uint32x4_t va4c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va4)), 1); const uint32x4_t vm0123c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c67), vmovq_n_u16(0))); const uint32x4_t vm4567c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c67), vmovq_n_u16(0))); const uint32x4_t va0x0123c67 = vbicq_u32(va0c67, vm0123c67); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c67, vreinterpretq_bf16_u32(va0x0123c67)); const uint32x4_t va1x0123c67 = vbicq_u32(va1c67, vm0123c67); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c67, vreinterpretq_bf16_u32(va1x0123c67)); const uint32x4_t va2x0123c67 = vbicq_u32(va2c67, vm0123c67); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c67, vreinterpretq_bf16_u32(va2x0123c67)); const uint32x4_t va3x0123c67 = vbicq_u32(va3c67, vm0123c67); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c67, vreinterpretq_bf16_u32(va3x0123c67)); const uint32x4_t va4x0123c67 = vbicq_u32(va4c67, vm0123c67); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c67, vreinterpretq_bf16_u32(va4x0123c67)); const uint32x4_t va0x4567c67 = vbicq_u32(va0c67, vm4567c67); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c67, vreinterpretq_bf16_u32(va0x4567c67)); const uint32x4_t va1x4567c67 = vbicq_u32(va1c67, vm4567c67); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c67, vreinterpretq_bf16_u32(va1x4567c67)); const uint32x4_t va2x4567c67 = vbicq_u32(va2c67, vm4567c67); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c67, vreinterpretq_bf16_u32(va2x4567c67)); const uint32x4_t va3x4567c67 = vbicq_u32(va3c67, vm4567c67); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c67, vreinterpretq_bf16_u32(va3x4567c67)); const uint32x4_t va4x4567c67 = vbicq_u32(va4c67, vm4567c67); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c67, vreinterpretq_bf16_u32(va4x4567c67)); } } } } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123); bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123); bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123); bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123); bfloat16x4_t vout4x0123 = vcvt_bf16_f32(vacc4x0123); bfloat16x4_t vout0x4567 = vcvt_bf16_f32(vacc0x4567); bfloat16x4_t vout1x4567 = vcvt_bf16_f32(vacc1x4567); bfloat16x4_t vout2x4567 = vcvt_bf16_f32(vacc2x4567); bfloat16x4_t vout3x4567 = vcvt_bf16_f32(vacc3x4567); bfloat16x4_t vout4x4567 = vcvt_bf16_f32(vacc4x4567); if XNN_LIKELY(nc >= 8) { vst1_bf16(c0, vout0x0123); vst1_bf16(c0 + 4, vout0x4567); c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride); vst1_bf16(c1, vout1x0123); vst1_bf16(c1 + 4, vout1x4567); c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride); vst1_bf16(c2, vout2x0123); vst1_bf16(c2 + 4, vout2x4567); c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride); vst1_bf16(c3, vout3x0123); vst1_bf16(c3 + 4, vout3x4567); c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride); vst1_bf16(c4, vout4x0123); vst1_bf16(c4 + 4, vout4x4567); c4 = (bfloat16_t*) ((uintptr_t) c4 + cn_stride); a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc); a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc); a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc); a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc); a4 = (const bfloat16_t*) ((uintptr_t) a4 - kc); nc -= 8; } else { if (nc & 4) { vst1_bf16(c0, vout0x0123); c0 += 4; vst1_bf16(c1, vout1x0123); c1 += 4; vst1_bf16(c2, vout2x0123); c2 += 4; vst1_bf16(c3, vout3x0123); c3 += 4; vst1_bf16(c4, vout4x0123); c4 += 4; vout0x0123 = vout0x4567; vout1x0123 = vout1x4567; vout2x0123 = vout2x4567; vout3x0123 = vout3x4567; vout4x0123 = vout4x4567; } if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_bf16(vout4x0123), 0); c4 += 2; vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2)); vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2)); vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2)); vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2)); vout4x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout4x0123), vreinterpret_u16_bf16(vout4x0123), 2)); } if (nc & 1) { vst1_lane_bf16(c0, vout0x0123, 0); vst1_lane_bf16(c1, vout1x0123, 0); vst1_lane_bf16(c2, vout2x0123, 0); vst1_lane_bf16(c3, vout3x0123, 0); vst1_lane_bf16(c4, vout4x0123, 0); } nc = 0; } } while (nc != 0); }
20,988
53.658854
126
c
XNNPACK
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-6x8c2-minmax-neonbf16-bfdot-lane-ld128.c
// Auto-generated file. Do not edit! // Template: src/bf16-gemm/c2-neonbf16-bfdot-lane-ld128.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> void xnn_bf16_gemm_minmax_ukernel_6x8c2__neonbf16_bfdot_lane_ld128( size_t mr, size_t nc, size_t kc, const void* restrict a, size_t a_stride, const void* restrict w_ptr, void* restrict c, size_t cm_stride, size_t cn_stride, const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(bfloat16_t) == 0); assert(a != NULL); assert(w_ptr != NULL); assert(c != NULL); const bfloat16_t* a0 = (const bfloat16_t*) a; bfloat16_t* c0 = (bfloat16_t*) c; const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride); bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { a1 = a0; c1 = c0; } const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride); bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { a2 = a1; c2 = c1; } const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride); bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } const bfloat16_t* a4 = (const bfloat16_t*) ((uintptr_t) a3 + a_stride); bfloat16_t* c4 = (bfloat16_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { a4 = a3; c4 = c3; } const bfloat16_t* a5 = (const bfloat16_t*) ((uintptr_t) a4 + a_stride); bfloat16_t* c5 = (bfloat16_t*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { a5 = a4; c5 = c4; } const bfloat16_t* w = (const bfloat16_t*) w_ptr; do { float32x4_t vacc0x0123 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc0x4567 = vcvt_f32_bf16(vld1_bf16(w)); w += 4; float32x4_t vacc1x0123 = vacc0x0123; float32x4_t vacc1x4567 = vacc0x4567; float32x4_t vacc2x0123 = vacc0x0123; float32x4_t vacc2x4567 = vacc0x4567; float32x4_t vacc3x0123 = vacc0x0123; float32x4_t vacc3x4567 = vacc0x4567; float32x4_t vacc4x0123 = vacc0x0123; float32x4_t vacc4x4567 = vacc0x4567; float32x4_t vacc5x0123 = vacc0x0123; float32x4_t vacc5x4567 = vacc0x4567; size_t k = kc; for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8; const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8; const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8; const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8; const bfloat16x8_t va4 = vld1q_bf16(a4); a4 += 8; const bfloat16x8_t va5 = vld1q_bf16(a5); a5 += 8; const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c01, va0, 0); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c01, va1, 0); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c01, va2, 0); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c01, va3, 0); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c01, va4, 0); vacc5x0123 = vbfdotq_laneq_f32(vacc5x0123, vb0123c01, va5, 0); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c01, va0, 0); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c01, va1, 0); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c01, va2, 0); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c01, va3, 0); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c01, va4, 0); vacc5x4567 = vbfdotq_laneq_f32(vacc5x4567, vb4567c01, va5, 0); const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c23, va0, 1); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c23, va1, 1); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c23, va2, 1); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c23, va3, 1); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c23, va4, 1); vacc5x0123 = vbfdotq_laneq_f32(vacc5x0123, vb0123c23, va5, 1); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c23, va0, 1); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c23, va1, 1); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c23, va2, 1); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c23, va3, 1); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c23, va4, 1); vacc5x4567 = vbfdotq_laneq_f32(vacc5x4567, vb4567c23, va5, 1); const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c45, va0, 2); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c45, va1, 2); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c45, va2, 2); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c45, va3, 2); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c45, va4, 2); vacc5x0123 = vbfdotq_laneq_f32(vacc5x0123, vb0123c45, va5, 2); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c45, va0, 2); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c45, va1, 2); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c45, va2, 2); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c45, va3, 2); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c45, va4, 2); vacc5x4567 = vbfdotq_laneq_f32(vacc5x4567, vb4567c45, va5, 2); const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c67, va0, 3); vacc1x0123 = vbfdotq_laneq_f32(vacc1x0123, vb0123c67, va1, 3); vacc2x0123 = vbfdotq_laneq_f32(vacc2x0123, vb0123c67, va2, 3); vacc3x0123 = vbfdotq_laneq_f32(vacc3x0123, vb0123c67, va3, 3); vacc4x0123 = vbfdotq_laneq_f32(vacc4x0123, vb0123c67, va4, 3); vacc5x0123 = vbfdotq_laneq_f32(vacc5x0123, vb0123c67, va5, 3); vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c67, va0, 3); vacc1x4567 = vbfdotq_laneq_f32(vacc1x4567, vb4567c67, va1, 3); vacc2x4567 = vbfdotq_laneq_f32(vacc2x4567, vb4567c67, va2, 3); vacc3x4567 = vbfdotq_laneq_f32(vacc3x4567, vb4567c67, va3, 3); vacc4x4567 = vbfdotq_laneq_f32(vacc4x4567, vb4567c67, va4, 3); vacc5x4567 = vbfdotq_laneq_f32(vacc5x4567, vb4567c67, va5, 3); } if XNN_UNLIKELY(k != 0) { const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k); const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k); const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k); const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k); const bfloat16x8_t va4 = vld1q_bf16(a4); a4 = (const bfloat16_t*) ((uintptr_t) a4 + k); const bfloat16x8_t va5 = vld1q_bf16(a5); a5 = (const bfloat16_t*) ((uintptr_t) a5 + k); const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8; const uint32x4_t va0c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 0); const uint32x4_t va1c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 0); const uint32x4_t va2c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 0); const uint32x4_t va3c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 0); const uint32x4_t va4c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va4)), 0); const uint32x4_t va5c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va5)), 0); const uint32x4_t vm0123c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c01), vmovq_n_u16(0))); const uint32x4_t vm4567c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c01), vmovq_n_u16(0))); const uint32x4_t va0x0123c01 = vbicq_u32(va0c01, vm0123c01); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c01, vreinterpretq_bf16_u32(va0x0123c01)); const uint32x4_t va1x0123c01 = vbicq_u32(va1c01, vm0123c01); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c01, vreinterpretq_bf16_u32(va1x0123c01)); const uint32x4_t va2x0123c01 = vbicq_u32(va2c01, vm0123c01); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c01, vreinterpretq_bf16_u32(va2x0123c01)); const uint32x4_t va3x0123c01 = vbicq_u32(va3c01, vm0123c01); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c01, vreinterpretq_bf16_u32(va3x0123c01)); const uint32x4_t va4x0123c01 = vbicq_u32(va4c01, vm0123c01); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c01, vreinterpretq_bf16_u32(va4x0123c01)); const uint32x4_t va5x0123c01 = vbicq_u32(va5c01, vm0123c01); vacc5x0123 = vbfdotq_f32(vacc5x0123, vb0123c01, vreinterpretq_bf16_u32(va5x0123c01)); const uint32x4_t va0x4567c01 = vbicq_u32(va0c01, vm4567c01); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c01, vreinterpretq_bf16_u32(va0x4567c01)); const uint32x4_t va1x4567c01 = vbicq_u32(va1c01, vm4567c01); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c01, vreinterpretq_bf16_u32(va1x4567c01)); const uint32x4_t va2x4567c01 = vbicq_u32(va2c01, vm4567c01); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c01, vreinterpretq_bf16_u32(va2x4567c01)); const uint32x4_t va3x4567c01 = vbicq_u32(va3c01, vm4567c01); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c01, vreinterpretq_bf16_u32(va3x4567c01)); const uint32x4_t va4x4567c01 = vbicq_u32(va4c01, vm4567c01); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c01, vreinterpretq_bf16_u32(va4x4567c01)); const uint32x4_t va5x4567c01 = vbicq_u32(va5c01, vm4567c01); vacc5x4567 = vbfdotq_f32(vacc5x4567, vb4567c01, vreinterpretq_bf16_u32(va5x4567c01)); if (k > 2 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8; const uint32x4_t va0c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 1); const uint32x4_t va1c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va1)), 1); const uint32x4_t va2c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va2)), 1); const uint32x4_t va3c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va3)), 1); const uint32x4_t va4c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va4)), 1); const uint32x4_t va5c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va5)), 1); const uint32x4_t vm0123c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c23), vmovq_n_u16(0))); const uint32x4_t vm4567c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c23), vmovq_n_u16(0))); const uint32x4_t va0x0123c23 = vbicq_u32(va0c23, vm0123c23); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c23, vreinterpretq_bf16_u32(va0x0123c23)); const uint32x4_t va1x0123c23 = vbicq_u32(va1c23, vm0123c23); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c23, vreinterpretq_bf16_u32(va1x0123c23)); const uint32x4_t va2x0123c23 = vbicq_u32(va2c23, vm0123c23); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c23, vreinterpretq_bf16_u32(va2x0123c23)); const uint32x4_t va3x0123c23 = vbicq_u32(va3c23, vm0123c23); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c23, vreinterpretq_bf16_u32(va3x0123c23)); const uint32x4_t va4x0123c23 = vbicq_u32(va4c23, vm0123c23); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c23, vreinterpretq_bf16_u32(va4x0123c23)); const uint32x4_t va5x0123c23 = vbicq_u32(va5c23, vm0123c23); vacc5x0123 = vbfdotq_f32(vacc5x0123, vb0123c23, vreinterpretq_bf16_u32(va5x0123c23)); const uint32x4_t va0x4567c23 = vbicq_u32(va0c23, vm4567c23); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c23, vreinterpretq_bf16_u32(va0x4567c23)); const uint32x4_t va1x4567c23 = vbicq_u32(va1c23, vm4567c23); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c23, vreinterpretq_bf16_u32(va1x4567c23)); const uint32x4_t va2x4567c23 = vbicq_u32(va2c23, vm4567c23); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c23, vreinterpretq_bf16_u32(va2x4567c23)); const uint32x4_t va3x4567c23 = vbicq_u32(va3c23, vm4567c23); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c23, vreinterpretq_bf16_u32(va3x4567c23)); const uint32x4_t va4x4567c23 = vbicq_u32(va4c23, vm4567c23); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c23, vreinterpretq_bf16_u32(va4x4567c23)); const uint32x4_t va5x4567c23 = vbicq_u32(va5c23, vm4567c23); vacc5x4567 = vbfdotq_f32(vacc5x4567, vb4567c23, vreinterpretq_bf16_u32(va5x4567c23)); if (k > 4 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8; const uint32x4_t va0c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 0); const uint32x4_t va1c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 0); const uint32x4_t va2c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 0); const uint32x4_t va3c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 0); const uint32x4_t va4c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va4)), 0); const uint32x4_t va5c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va5)), 0); const uint32x4_t vm0123c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c45), vmovq_n_u16(0))); const uint32x4_t vm4567c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c45), vmovq_n_u16(0))); const uint32x4_t va0x0123c45 = vbicq_u32(va0c45, vm0123c45); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c45, vreinterpretq_bf16_u32(va0x0123c45)); const uint32x4_t va1x0123c45 = vbicq_u32(va1c45, vm0123c45); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c45, vreinterpretq_bf16_u32(va1x0123c45)); const uint32x4_t va2x0123c45 = vbicq_u32(va2c45, vm0123c45); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c45, vreinterpretq_bf16_u32(va2x0123c45)); const uint32x4_t va3x0123c45 = vbicq_u32(va3c45, vm0123c45); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c45, vreinterpretq_bf16_u32(va3x0123c45)); const uint32x4_t va4x0123c45 = vbicq_u32(va4c45, vm0123c45); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c45, vreinterpretq_bf16_u32(va4x0123c45)); const uint32x4_t va5x0123c45 = vbicq_u32(va5c45, vm0123c45); vacc5x0123 = vbfdotq_f32(vacc5x0123, vb0123c45, vreinterpretq_bf16_u32(va5x0123c45)); const uint32x4_t va0x4567c45 = vbicq_u32(va0c45, vm4567c45); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c45, vreinterpretq_bf16_u32(va0x4567c45)); const uint32x4_t va1x4567c45 = vbicq_u32(va1c45, vm4567c45); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c45, vreinterpretq_bf16_u32(va1x4567c45)); const uint32x4_t va2x4567c45 = vbicq_u32(va2c45, vm4567c45); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c45, vreinterpretq_bf16_u32(va2x4567c45)); const uint32x4_t va3x4567c45 = vbicq_u32(va3c45, vm4567c45); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c45, vreinterpretq_bf16_u32(va3x4567c45)); const uint32x4_t va4x4567c45 = vbicq_u32(va4c45, vm4567c45); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c45, vreinterpretq_bf16_u32(va4x4567c45)); const uint32x4_t va5x4567c45 = vbicq_u32(va5c45, vm4567c45); vacc5x4567 = vbfdotq_f32(vacc5x4567, vb4567c45, vreinterpretq_bf16_u32(va5x4567c45)); if (k > 6 * sizeof(bfloat16_t)) { const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8; const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8; const uint32x4_t va0c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 1); const uint32x4_t va1c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va1)), 1); const uint32x4_t va2c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va2)), 1); const uint32x4_t va3c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va3)), 1); const uint32x4_t va4c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va4)), 1); const uint32x4_t va5c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va5)), 1); const uint32x4_t vm0123c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c67), vmovq_n_u16(0))); const uint32x4_t vm4567c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c67), vmovq_n_u16(0))); const uint32x4_t va0x0123c67 = vbicq_u32(va0c67, vm0123c67); vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c67, vreinterpretq_bf16_u32(va0x0123c67)); const uint32x4_t va1x0123c67 = vbicq_u32(va1c67, vm0123c67); vacc1x0123 = vbfdotq_f32(vacc1x0123, vb0123c67, vreinterpretq_bf16_u32(va1x0123c67)); const uint32x4_t va2x0123c67 = vbicq_u32(va2c67, vm0123c67); vacc2x0123 = vbfdotq_f32(vacc2x0123, vb0123c67, vreinterpretq_bf16_u32(va2x0123c67)); const uint32x4_t va3x0123c67 = vbicq_u32(va3c67, vm0123c67); vacc3x0123 = vbfdotq_f32(vacc3x0123, vb0123c67, vreinterpretq_bf16_u32(va3x0123c67)); const uint32x4_t va4x0123c67 = vbicq_u32(va4c67, vm0123c67); vacc4x0123 = vbfdotq_f32(vacc4x0123, vb0123c67, vreinterpretq_bf16_u32(va4x0123c67)); const uint32x4_t va5x0123c67 = vbicq_u32(va5c67, vm0123c67); vacc5x0123 = vbfdotq_f32(vacc5x0123, vb0123c67, vreinterpretq_bf16_u32(va5x0123c67)); const uint32x4_t va0x4567c67 = vbicq_u32(va0c67, vm4567c67); vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c67, vreinterpretq_bf16_u32(va0x4567c67)); const uint32x4_t va1x4567c67 = vbicq_u32(va1c67, vm4567c67); vacc1x4567 = vbfdotq_f32(vacc1x4567, vb4567c67, vreinterpretq_bf16_u32(va1x4567c67)); const uint32x4_t va2x4567c67 = vbicq_u32(va2c67, vm4567c67); vacc2x4567 = vbfdotq_f32(vacc2x4567, vb4567c67, vreinterpretq_bf16_u32(va2x4567c67)); const uint32x4_t va3x4567c67 = vbicq_u32(va3c67, vm4567c67); vacc3x4567 = vbfdotq_f32(vacc3x4567, vb4567c67, vreinterpretq_bf16_u32(va3x4567c67)); const uint32x4_t va4x4567c67 = vbicq_u32(va4c67, vm4567c67); vacc4x4567 = vbfdotq_f32(vacc4x4567, vb4567c67, vreinterpretq_bf16_u32(va4x4567c67)); const uint32x4_t va5x4567c67 = vbicq_u32(va5c67, vm4567c67); vacc5x4567 = vbfdotq_f32(vacc5x4567, vb4567c67, vreinterpretq_bf16_u32(va5x4567c67)); } } } } const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); vacc0x0123 = vminq_f32(vacc0x0123, vmax); vacc1x0123 = vminq_f32(vacc1x0123, vmax); vacc2x0123 = vminq_f32(vacc2x0123, vmax); vacc3x0123 = vminq_f32(vacc3x0123, vmax); vacc4x0123 = vminq_f32(vacc4x0123, vmax); vacc5x0123 = vminq_f32(vacc5x0123, vmax); vacc0x4567 = vminq_f32(vacc0x4567, vmax); vacc1x4567 = vminq_f32(vacc1x4567, vmax); vacc2x4567 = vminq_f32(vacc2x4567, vmax); vacc3x4567 = vminq_f32(vacc3x4567, vmax); vacc4x4567 = vminq_f32(vacc4x4567, vmax); vacc5x4567 = vminq_f32(vacc5x4567, vmax); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); vacc0x0123 = vmaxq_f32(vacc0x0123, vmin); vacc1x0123 = vmaxq_f32(vacc1x0123, vmin); vacc2x0123 = vmaxq_f32(vacc2x0123, vmin); vacc3x0123 = vmaxq_f32(vacc3x0123, vmin); vacc4x0123 = vmaxq_f32(vacc4x0123, vmin); vacc5x0123 = vmaxq_f32(vacc5x0123, vmin); vacc0x4567 = vmaxq_f32(vacc0x4567, vmin); vacc1x4567 = vmaxq_f32(vacc1x4567, vmin); vacc2x4567 = vmaxq_f32(vacc2x4567, vmin); vacc3x4567 = vmaxq_f32(vacc3x4567, vmin); vacc4x4567 = vmaxq_f32(vacc4x4567, vmin); vacc5x4567 = vmaxq_f32(vacc5x4567, vmin); bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123); bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123); bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123); bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123); bfloat16x4_t vout4x0123 = vcvt_bf16_f32(vacc4x0123); bfloat16x4_t vout5x0123 = vcvt_bf16_f32(vacc5x0123); bfloat16x4_t vout0x4567 = vcvt_bf16_f32(vacc0x4567); bfloat16x4_t vout1x4567 = vcvt_bf16_f32(vacc1x4567); bfloat16x4_t vout2x4567 = vcvt_bf16_f32(vacc2x4567); bfloat16x4_t vout3x4567 = vcvt_bf16_f32(vacc3x4567); bfloat16x4_t vout4x4567 = vcvt_bf16_f32(vacc4x4567); bfloat16x4_t vout5x4567 = vcvt_bf16_f32(vacc5x4567); if XNN_LIKELY(nc >= 8) { vst1_bf16(c0, vout0x0123); vst1_bf16(c0 + 4, vout0x4567); c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride); vst1_bf16(c1, vout1x0123); vst1_bf16(c1 + 4, vout1x4567); c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride); vst1_bf16(c2, vout2x0123); vst1_bf16(c2 + 4, vout2x4567); c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride); vst1_bf16(c3, vout3x0123); vst1_bf16(c3 + 4, vout3x4567); c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride); vst1_bf16(c4, vout4x0123); vst1_bf16(c4 + 4, vout4x4567); c4 = (bfloat16_t*) ((uintptr_t) c4 + cn_stride); vst1_bf16(c5, vout5x0123); vst1_bf16(c5 + 4, vout5x4567); c5 = (bfloat16_t*) ((uintptr_t) c5 + cn_stride); a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc); a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc); a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc); a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc); a4 = (const bfloat16_t*) ((uintptr_t) a4 - kc); a5 = (const bfloat16_t*) ((uintptr_t) a5 - kc); nc -= 8; } else { if (nc & 4) { vst1_bf16(c0, vout0x0123); c0 += 4; vst1_bf16(c1, vout1x0123); c1 += 4; vst1_bf16(c2, vout2x0123); c2 += 4; vst1_bf16(c3, vout3x0123); c3 += 4; vst1_bf16(c4, vout4x0123); c4 += 4; vst1_bf16(c5, vout5x0123); c5 += 4; vout0x0123 = vout0x4567; vout1x0123 = vout1x4567; vout2x0123 = vout2x4567; vout3x0123 = vout3x4567; vout4x0123 = vout4x4567; vout5x0123 = vout5x4567; } if (nc & 2) { vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2; vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2; vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2; vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2; vst1_lane_u32((void*) c4, vreinterpret_u32_bf16(vout4x0123), 0); c4 += 2; vst1_lane_u32((void*) c5, vreinterpret_u32_bf16(vout5x0123), 0); c5 += 2; vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2)); vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2)); vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2)); vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2)); vout4x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout4x0123), vreinterpret_u16_bf16(vout4x0123), 2)); vout5x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout5x0123), vreinterpret_u16_bf16(vout5x0123), 2)); } if (nc & 1) { vst1_lane_bf16(c0, vout0x0123, 0); vst1_lane_bf16(c1, vout1x0123, 0); vst1_lane_bf16(c2, vout2x0123, 0); vst1_lane_bf16(c3, vout3x0123, 0); vst1_lane_bf16(c4, vout4x0123, 0); vst1_lane_bf16(c5, vout5x0123, 0); } nc = 0; } } while (nc != 0); }
24,489
55.04119
126
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-neon-x1.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> #include <arm_neon.h> void xnn_cs16_bfly4_ukernel__neon_x1( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 2) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); const int16x4_t vdiv4 = vdup_n_s16(8191); const int16x4_t vnegr = vreinterpret_s16_u32(vdup_n_u32(0x0001ffff)); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); // First sample skips twiddle. { int16x4_t vout0 = vreinterpret_s16_u32(vld1_dup_u32((void*) data0)); int16x4_t vout1 = vreinterpret_s16_u32(vld1_dup_u32((void*) data1)); int16x4_t vout2 = vreinterpret_s16_u32(vld1_dup_u32((void*) data2)); int16x4_t vout3 = vreinterpret_s16_u32(vld1_dup_u32((void*) data3)); vout1 = vqrdmulh_s16(vout1, vdiv4); vout3 = vqrdmulh_s16(vout3, vdiv4); vout0 = vqrdmulh_s16(vout0, vdiv4); vout2 = vqrdmulh_s16(vout2, vdiv4); const int16x4_t vtmp4 = vsub_s16(vout1, vout3); const int16x4_t vtmp3 = vadd_s16(vout1, vout3); int16x4_t vrev4 = vmul_s16(vtmp4, vnegr); // vrev4 = vtmp4 -r, i const int16x4_t vtmp5 = vsub_s16(vout0, vout2); vout0 = vadd_s16(vout0, vout2); vrev4 = vrev32_s16(vrev4); // vrev4 = vtmp4 i, -r vout2 = vsub_s16(vout0, vtmp3); vout0 = vadd_s16(vout0, vtmp3); vout1 = vadd_s16(vtmp5, vrev4); vout3 = vsub_s16(vtmp5, vrev4); vst1_lane_u32((void*) data0, vreinterpret_u32_s16(vout0), 0); data0 += 2; vst1_lane_u32((void*) data1, vreinterpret_u32_s16(vout1), 0); data1 += 2; vst1_lane_u32((void*) data2, vreinterpret_u32_s16(vout2), 0); data2 += 2; vst1_lane_u32((void*) data3, vreinterpret_u32_s16(vout3), 0); data3 += 2; } size_t s = samples - sizeof(int16_t) * 2; if XNN_LIKELY(s != 0) { const int16_t* tw1 = (const int16_t*) ((uintptr_t) twiddle + stride); const int16_t* tw2 = (const int16_t*) ((uintptr_t) twiddle + stride * 2); const int16_t* tw3 = (const int16_t*) ((uintptr_t) twiddle + stride * 3); do { int16x4_t vout0 = vreinterpret_s16_u32(vld1_dup_u32((void*) data0)); int16x4_t vout1 = vreinterpret_s16_u32(vld1_dup_u32((void*) data1)); int16x4_t vout2 = vreinterpret_s16_u32(vld1_dup_u32((void*) data2)); int16x4_t vout3 = vreinterpret_s16_u32(vld1_dup_u32((void*) data3)); const int16x4_t vtw1 = vreinterpret_s16_u32(vld1_dup_u32((const void*) tw1)); const int16x4_t vtw2 = vreinterpret_s16_u32(vld1_dup_u32((const void*) tw2)); const int16x4_t vtw3 = vreinterpret_s16_u32(vld1_dup_u32((const void*) tw3)); tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0 = vqrdmulh_s16(vout0, vdiv4); vout1 = vqrdmulh_s16(vout1, vdiv4); vout2 = vqrdmulh_s16(vout2, vdiv4); vout3 = vqrdmulh_s16(vout3, vdiv4); int16x4_t vnegtw1 = vmul_s16(vtw1, vnegr); // vrevtw1 = vtw1 -r, i int16x4_t vnegtw2 = vmul_s16(vtw2, vnegr); // vrevtw2 = vtw2 -r, i int16x4_t vnegtw3 = vmul_s16(vtw3, vnegr); // vrevtw3 = vtw3 -r, i int32x4_t vaccr1 = vmull_lane_s16(vtw1, vout1, 0); int32x4_t vaccr2 = vmull_lane_s16(vtw2, vout2, 0); int32x4_t vaccr3 = vmull_lane_s16(vtw3, vout3, 0); int16x4_t vrevtw1 = vrev32_s16(vnegtw1); // vrevtw1 = vtw1 i, -r int16x4_t vrevtw2 = vrev32_s16(vnegtw2); // vrevtw2 = vtw2 i, -r int16x4_t vrevtw3 = vrev32_s16(vnegtw3); // vrevtw3 = vtw3 i, -r vaccr1 = vmlsl_lane_s16(vaccr1, vrevtw1, vout1, 1); vaccr2 = vmlsl_lane_s16(vaccr2, vrevtw2, vout2, 1); vaccr3 = vmlsl_lane_s16(vaccr3, vrevtw3, vout3, 1); const int16x4_t vtmp0 = vrshrn_n_s32(vaccr1, 15); const int16x4_t vtmp1 = vrshrn_n_s32(vaccr2, 15); const int16x4_t vtmp2 = vrshrn_n_s32(vaccr3, 15); const int16x4_t vtmp4 = vsub_s16(vtmp0, vtmp2); const int16x4_t vtmp3 = vadd_s16(vtmp0, vtmp2); int16x4_t vrev4 = vmul_s16(vtmp4, vnegr); // vrev4 = vtmp4 -r, i const int16x4_t vtmp5 = vsub_s16(vout0, vtmp1); vout0 = vadd_s16(vout0, vtmp1); vrev4 = vrev32_s16(vrev4); // vrev4 = vtmp4 i, -r vout2 = vsub_s16(vout0, vtmp3); vout0 = vadd_s16(vout0, vtmp3); vout1 = vadd_s16(vtmp5, vrev4); vout3 = vsub_s16(vtmp5, vrev4); vst1_lane_u32((void*) data0, vreinterpret_u32_s16(vout0), 0); data0 += 2; vst1_lane_u32((void*) data1, vreinterpret_u32_s16(vout1), 0); data1 += 2; vst1_lane_u32((void*) data2, vreinterpret_u32_s16(vout2), 0); data2 += 2; vst1_lane_u32((void*) data3, vreinterpret_u32_s16(vout3), 0); data3 += 2; s -= sizeof(int16_t) * 2; } while (s != 0); } } while (--batch != 0); }
5,521
38.726619
85
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-neon-x4.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> #include <arm_neon.h> void xnn_cs16_bfly4_ukernel__neon_x4( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 8) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); const int16x4_t vdiv4 = vdup_n_s16(8191); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; size_t s = samples; for (; s >= sizeof(int16_t) * 8; s -= sizeof(int16_t) * 8) { int16x4x2_t vout0 = vld2_s16(data0); int16x4x2_t vout1 = vld2_s16(data1); int16x4x2_t vout2 = vld2_s16(data2); int16x4x2_t vout3 = vld2_s16(data3); int16x4x2_t vtw1 = vld2_dup_s16(tw1); int16x4x2_t vtw2 = vld2_dup_s16(tw2); int16x4x2_t vtw3 = vld2_dup_s16(tw3); tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); vtw1 = vld2_lane_s16(tw1, vtw1, 1); vtw2 = vld2_lane_s16(tw2, vtw2, 1); vtw3 = vld2_lane_s16(tw3, vtw3, 1); tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); vtw1 = vld2_lane_s16(tw1, vtw1, 2); vtw2 = vld2_lane_s16(tw2, vtw2, 2); vtw3 = vld2_lane_s16(tw3, vtw3, 2); tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); vtw1 = vld2_lane_s16(tw1, vtw1, 3); vtw2 = vld2_lane_s16(tw2, vtw2, 3); vtw3 = vld2_lane_s16(tw3, vtw3, 3); tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout1.val[0] = vqrdmulh_s16(vout1.val[0], vdiv4); vout1.val[1] = vqrdmulh_s16(vout1.val[1], vdiv4); vout2.val[0] = vqrdmulh_s16(vout2.val[0], vdiv4); vout2.val[1] = vqrdmulh_s16(vout2.val[1], vdiv4); vout3.val[0] = vqrdmulh_s16(vout3.val[0], vdiv4); vout3.val[1] = vqrdmulh_s16(vout3.val[1], vdiv4); vout0.val[0] = vqrdmulh_s16(vout0.val[0], vdiv4); vout0.val[1] = vqrdmulh_s16(vout0.val[1], vdiv4); int32x4_t vacc0r = vmull_s16(vout1.val[0], vtw1.val[0]); int32x4_t vacc1r = vmull_s16(vout2.val[0], vtw2.val[0]); int32x4_t vacc2r = vmull_s16(vout3.val[0], vtw3.val[0]); int32x4_t vacc0i = vmull_s16(vout1.val[0], vtw1.val[1]); int32x4_t vacc1i = vmull_s16(vout2.val[0], vtw2.val[1]); int32x4_t vacc2i = vmull_s16(vout3.val[0], vtw3.val[1]); vacc0r = vmlsl_s16(vacc0r, vout1.val[1], vtw1.val[1]); vacc1r = vmlsl_s16(vacc1r, vout2.val[1], vtw2.val[1]); vacc2r = vmlsl_s16(vacc2r, vout3.val[1], vtw3.val[1]); vacc0i = vmlal_s16(vacc0i, vout1.val[1], vtw1.val[0]); vacc1i = vmlal_s16(vacc1i, vout2.val[1], vtw2.val[0]); vacc2i = vmlal_s16(vacc2i, vout3.val[1], vtw3.val[0]); int16x4_t vtmp0r = vrshrn_n_s32(vacc0r, 15); int16x4_t vtmp1r = vrshrn_n_s32(vacc1r, 15); int16x4_t vtmp2r = vrshrn_n_s32(vacc2r, 15); int16x4_t vtmp0i = vrshrn_n_s32(vacc0i, 15); int16x4_t vtmp1i = vrshrn_n_s32(vacc1i, 15); int16x4_t vtmp2i = vrshrn_n_s32(vacc2i, 15); const int16x4_t vtmp4r = vsub_s16(vtmp0r, vtmp2r); const int16x4_t vtmp4i = vsub_s16(vtmp0i, vtmp2i); const int16x4_t vtmp3r = vadd_s16(vtmp0r, vtmp2r); const int16x4_t vtmp3i = vadd_s16(vtmp0i, vtmp2i); const int16x4_t vtmp5r = vsub_s16(vout0.val[0], vtmp1r); const int16x4_t vtmp5i = vsub_s16(vout0.val[1], vtmp1i); vout0.val[0] = vadd_s16(vout0.val[0], vtmp1r); vout0.val[1] = vadd_s16(vout0.val[1], vtmp1i); vout2.val[0] = vsub_s16(vout0.val[0], vtmp3r); vout2.val[1] = vsub_s16(vout0.val[1], vtmp3i); vout0.val[0] = vadd_s16(vout0.val[0], vtmp3r); vout0.val[1] = vadd_s16(vout0.val[1], vtmp3i); vout1.val[0] = vadd_s16(vtmp5r, vtmp4i); vout1.val[1] = vsub_s16(vtmp5i, vtmp4r); vout3.val[0] = vsub_s16(vtmp5r, vtmp4i); vout3.val[1] = vadd_s16(vtmp5i, vtmp4r); vst2_s16(data0, vout0); data0 += 8; vst2_s16(data1, vout1); data1 += 8; vst2_s16(data2, vout2); data2 += 8; vst2_s16(data3, vout3); data3 += 8; } } while (--batch != 0); }
5,111
38.323077
72
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-samples1-neon.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> #include <arm_neon.h> void xnn_cs16_bfly4_samples1_ukernel__neon( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples == sizeof(int16_t) * 2); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); const int16x4_t vdiv4 = vdup_n_s16(8191); const int16x4_t vnegr = vreinterpret_s16_u32(vdup_n_u32(0x0001ffff)); uint32x2x4_t vout; do { const uint32x2x4_t vi = (vld4_dup_u32((void*)data)); int16x4_t vout1 = vqrdmulh_s16(vreinterpret_s16_u32(vi.val[1]), vdiv4); int16x4_t vout3 = vqrdmulh_s16(vreinterpret_s16_u32(vi.val[3]), vdiv4); int16x4_t vout0 = vqrdmulh_s16(vreinterpret_s16_u32(vi.val[0]), vdiv4); int16x4_t vout2 = vqrdmulh_s16(vreinterpret_s16_u32(vi.val[2]), vdiv4); const int16x4_t vtmp4 = vsub_s16(vout1, vout3); const int16x4_t vtmp3 = vadd_s16(vout1, vout3); int16x4_t vrev4 = vmul_s16(vtmp4, vnegr); // vrev4 = vtmp4 -r, i const int16x4_t vtmp5 = vsub_s16(vout0, vout2); vout0 = vadd_s16(vout0, vout2); vrev4 = vrev32_s16(vrev4); // vrev4 = vtmp4 i, -r vout.val[2] = vreinterpret_u32_s16(vsub_s16(vout0, vtmp3)); vout.val[0] = vreinterpret_u32_s16(vadd_s16(vout0, vtmp3)); vout.val[1] = vreinterpret_u32_s16(vadd_s16(vtmp5, vrev4)); vout.val[3] = vreinterpret_u32_s16(vsub_s16(vtmp5, vrev4)); vst4_lane_u32((void*)data, vout, 0); data += 8; } while(--batch != 0); }
1,759
29.344828
75
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-samples1-scalar.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_bfly4_samples1_ukernel__scalar( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(samples == sizeof(int16_t) * 2); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); do { int32_t vout0r = (int32_t) data[0]; int32_t vout0i = (int32_t) data[1]; int32_t vout1r = (int32_t) data[2]; int32_t vout1i = (int32_t) data[3]; int32_t vout2r = (int32_t) data[4]; int32_t vout2i = (int32_t) data[5]; int32_t vout3r = (int32_t) data[6]; int32_t vout3i = (int32_t) data[7]; // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp5r = vout0r - vout2r; const int32_t vtmp5i = vout0i - vout2i; vout0r += vout2r; vout0i += vout2i; const int32_t vtmp3r = vout1r + vout3r; const int32_t vtmp3i = vout1i + vout3i; const int32_t vtmp4r = vout1i - vout3i; const int32_t vtmp4i = -(vout1r - vout3r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data[0] = (int16_t) vout0r; data[1] = (int16_t) vout0i; data[2] = (int16_t) vout1r; data[3] = (int16_t) vout1i; data[4] = (int16_t) vout2r; data[5] = (int16_t) vout2i; data[6] = (int16_t) vout3r; data[7] = (int16_t) vout3i; data += 8; } while(--batch != 0); }
2,215
28.157895
72
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-samples4-neon.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> #include <arm_neon.h> static const int16_t xnn_table_fft256_samples4_twiddle[24] = { 32767,0, 30273,-12539, 23170,-23170, 12539,-30273, 32767,0, 23170,-23170, 0,-32767, -23170,-23170, 32767,0, 12539,-30273, -23170,-23170, -30273, 12539, }; void xnn_cs16_bfly4_samples4_ukernel__neon( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples == sizeof(int16_t) * 8); assert(data != NULL); assert(stride == sizeof(int16_t) * 2 * 16); assert(twiddle != NULL); const int16x4_t vdiv4 = vdup_n_s16(8191); int16x4x2_t vtw1 = vld2_s16(xnn_table_fft256_samples4_twiddle); int16x4x2_t vtw2 = vld2_s16(xnn_table_fft256_samples4_twiddle + 8); int16x4x2_t vtw3 = vld2_s16(xnn_table_fft256_samples4_twiddle + 16); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); int16x4x2_t vout0 = vld2_s16(data0); int16x4x2_t vout1 = vld2_s16(data1); int16x4x2_t vout2 = vld2_s16(data2); int16x4x2_t vout3 = vld2_s16(data3); // Note 32767 / 4 = 8191. Should be 8192. vout0.val[0] = vqrdmulh_s16(vout0.val[0], vdiv4); vout0.val[1] = vqrdmulh_s16(vout0.val[1], vdiv4); vout1.val[0] = vqrdmulh_s16(vout1.val[0], vdiv4); vout1.val[1] = vqrdmulh_s16(vout1.val[1], vdiv4); vout2.val[0] = vqrdmulh_s16(vout2.val[0], vdiv4); vout2.val[1] = vqrdmulh_s16(vout2.val[1], vdiv4); vout3.val[0] = vqrdmulh_s16(vout3.val[0], vdiv4); vout3.val[1] = vqrdmulh_s16(vout3.val[1], vdiv4); int32x4_t vacc0r = vmull_s16(vout1.val[0], vtw1.val[0]); int32x4_t vacc1r = vmull_s16(vout2.val[0], vtw2.val[0]); int32x4_t vacc2r = vmull_s16(vout3.val[0], vtw3.val[0]); int32x4_t vacc0i = vmull_s16(vout1.val[0], vtw1.val[1]); int32x4_t vacc1i = vmull_s16(vout2.val[0], vtw2.val[1]); int32x4_t vacc2i = vmull_s16(vout3.val[0], vtw3.val[1]); vacc0r = vmlsl_s16(vacc0r, vout1.val[1], vtw1.val[1]); vacc1r = vmlsl_s16(vacc1r, vout2.val[1], vtw2.val[1]); vacc2r = vmlsl_s16(vacc2r, vout3.val[1], vtw3.val[1]); vacc0i = vmlal_s16(vacc0i, vout1.val[1], vtw1.val[0]); vacc1i = vmlal_s16(vacc1i, vout2.val[1], vtw2.val[0]); vacc2i = vmlal_s16(vacc2i, vout3.val[1], vtw3.val[0]); int16x4_t vtmp0r = vrshrn_n_s32(vacc0r, 15); int16x4_t vtmp1r = vrshrn_n_s32(vacc1r, 15); int16x4_t vtmp2r = vrshrn_n_s32(vacc2r, 15); int16x4_t vtmp0i = vrshrn_n_s32(vacc0i, 15); int16x4_t vtmp1i = vrshrn_n_s32(vacc1i, 15); int16x4_t vtmp2i = vrshrn_n_s32(vacc2i, 15); const int16x4_t vtmp4r = vsub_s16(vtmp0r, vtmp2r); const int16x4_t vtmp4i = vsub_s16(vtmp0i, vtmp2i); const int16x4_t vtmp3r = vadd_s16(vtmp0r, vtmp2r); const int16x4_t vtmp3i = vadd_s16(vtmp0i, vtmp2i); const int16x4_t vtmp5r = vsub_s16(vout0.val[0], vtmp1r); const int16x4_t vtmp5i = vsub_s16(vout0.val[1], vtmp1i); vout0.val[0] = vadd_s16(vout0.val[0], vtmp1r); vout0.val[1] = vadd_s16(vout0.val[1], vtmp1i); vout2.val[0] = vsub_s16(vout0.val[0], vtmp3r); vout2.val[1] = vsub_s16(vout0.val[1], vtmp3i); vout0.val[0] = vadd_s16(vout0.val[0], vtmp3r); vout0.val[1] = vadd_s16(vout0.val[1], vtmp3i); vout1.val[0] = vadd_s16(vtmp5r, vtmp4i); vout1.val[1] = vsub_s16(vtmp5i, vtmp4r); vout3.val[0] = vsub_s16(vtmp5r, vtmp4i); vout3.val[1] = vadd_s16(vtmp5i, vtmp4r); vst2_s16(data0, vout0); vst2_s16(data1, vout1); vst2_s16(data2, vout2); vst2_s16(data3, vout3); data3 += 8; } while (--batch != 0); }
3,987
36.271028
72
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/cs16-bfly4-samples4-scalar.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_bfly4_samples4_ukernel__scalar( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 2) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw1r0 = (const int32_t) tw1[0]; const int32_t vtw1i0 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r1 = (const int32_t) tw1[0]; const int32_t vtw1i1 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r2 = (const int32_t) tw1[0]; const int32_t vtw1i2 = (const int32_t) tw1[1]; const int32_t vtw2r0 = (const int32_t) tw2[0]; const int32_t vtw2i0 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r1 = (const int32_t) tw2[0]; const int32_t vtw2i1 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r2 = (const int32_t) tw2[0]; const int32_t vtw2i2 = (const int32_t) tw2[1]; const int32_t vtw3r0 = (const int32_t) tw3[0]; const int32_t vtw3i0 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r1 = (const int32_t) tw3[0]; const int32_t vtw3i1 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r2 = (const int32_t) tw3[0]; const int32_t vtw3i2 = (const int32_t) tw3[1]; int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); // First sample skips twiddle. // Same code as samples=1 but supports stride { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp5r = vout0r - vout2r; const int32_t vtmp5i = vout0i - vout2i; vout0r += vout2r; vout0i += vout2i; const int32_t vtmp3r = vout1r + vout3r; const int32_t vtmp3i = vout1i + vout3i; const int32_t vtmp4r = vout1i - vout3i; const int32_t vtmp4i = -(vout1r - vout3r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; } int32_t vout0r0 = (int32_t) data0[0]; int32_t vout0i0 = (int32_t) data0[1]; int32_t vout0r1 = (int32_t) data0[2]; int32_t vout0i1 = (int32_t) data0[3]; int32_t vout0r2 = (int32_t) data0[4]; int32_t vout0i2 = (int32_t) data0[5]; int32_t vout1r0 = (int32_t) data1[0]; int32_t vout1i0 = (int32_t) data1[1]; int32_t vout1r1 = (int32_t) data1[2]; int32_t vout1i1 = (int32_t) data1[3]; int32_t vout1r2 = (int32_t) data1[4]; int32_t vout1i2 = (int32_t) data1[5]; int32_t vout2r0 = (int32_t) data2[0]; int32_t vout2i0 = (int32_t) data2[1]; int32_t vout2r1 = (int32_t) data2[2]; int32_t vout2i1 = (int32_t) data2[3]; int32_t vout2r2 = (int32_t) data2[4]; int32_t vout2i2 = (int32_t) data2[5]; int32_t vout3r0 = (int32_t) data3[0]; int32_t vout3i0 = (int32_t) data3[1]; int32_t vout3r1 = (int32_t) data3[2]; int32_t vout3i1 = (int32_t) data3[3]; int32_t vout3r2 = (int32_t) data3[4]; int32_t vout3i2 = (int32_t) data3[5]; // Note 32767 / 4 = 8191. Should be 8192. vout0r0 = math_asr_s32(vout0r0 * 8191 + 16384, 15); vout0i0 = math_asr_s32(vout0i0 * 8191 + 16384, 15); vout0r1 = math_asr_s32(vout0r1 * 8191 + 16384, 15); vout0i1 = math_asr_s32(vout0i1 * 8191 + 16384, 15); vout0r2 = math_asr_s32(vout0r2 * 8191 + 16384, 15); vout0i2 = math_asr_s32(vout0i2 * 8191 + 16384, 15); vout1r0 = math_asr_s32(vout1r0 * 8191 + 16384, 15); vout1i0 = math_asr_s32(vout1i0 * 8191 + 16384, 15); vout1r1 = math_asr_s32(vout1r1 * 8191 + 16384, 15); vout1i1 = math_asr_s32(vout1i1 * 8191 + 16384, 15); vout1r2 = math_asr_s32(vout1r2 * 8191 + 16384, 15); vout1i2 = math_asr_s32(vout1i2 * 8191 + 16384, 15); vout2r0 = math_asr_s32(vout2r0 * 8191 + 16384, 15); vout2i0 = math_asr_s32(vout2i0 * 8191 + 16384, 15); vout2r1 = math_asr_s32(vout2r1 * 8191 + 16384, 15); vout2i1 = math_asr_s32(vout2i1 * 8191 + 16384, 15); vout2r2 = math_asr_s32(vout2r2 * 8191 + 16384, 15); vout2i2 = math_asr_s32(vout2i2 * 8191 + 16384, 15); vout3r0 = math_asr_s32(vout3r0 * 8191 + 16384, 15); vout3i0 = math_asr_s32(vout3i0 * 8191 + 16384, 15); vout3r1 = math_asr_s32(vout3r1 * 8191 + 16384, 15); vout3i1 = math_asr_s32(vout3i1 * 8191 + 16384, 15); vout3r2 = math_asr_s32(vout3r2 * 8191 + 16384, 15); vout3i2 = math_asr_s32(vout3i2 * 8191 + 16384, 15); const int32_t vtmp0r0 = math_asr_s32(vout1r0 * vtw1r0 - vout1i0 * vtw1i0 + 16384, 15); const int32_t vtmp0i0 = math_asr_s32(vout1r0 * vtw1i0 + vout1i0 * vtw1r0 + 16384, 15); const int32_t vtmp0r1 = math_asr_s32(vout1r1 * vtw1r1 - vout1i1 * vtw1i1 + 16384, 15); const int32_t vtmp0i1 = math_asr_s32(vout1r1 * vtw1i1 + vout1i1 * vtw1r1 + 16384, 15); const int32_t vtmp0r2 = math_asr_s32(vout1r2 * vtw1r2 - vout1i2 * vtw1i2 + 16384, 15); const int32_t vtmp0i2 = math_asr_s32(vout1r2 * vtw1i2 + vout1i2 * vtw1r2 + 16384, 15); const int32_t vtmp1r0 = math_asr_s32(vout2r0 * vtw2r0 - vout2i0 * vtw2i0 + 16384, 15); const int32_t vtmp1i0 = math_asr_s32(vout2r0 * vtw2i0 + vout2i0 * vtw2r0 + 16384, 15); const int32_t vtmp1r1 = math_asr_s32(vout2r1 * vtw2r1 - vout2i1 * vtw2i1 + 16384, 15); const int32_t vtmp1i1 = math_asr_s32(vout2r1 * vtw2i1 + vout2i1 * vtw2r1 + 16384, 15); const int32_t vtmp1r2 = math_asr_s32(vout2r2 * vtw2r2 - vout2i2 * vtw2i2 + 16384, 15); const int32_t vtmp1i2 = math_asr_s32(vout2r2 * vtw2i2 + vout2i2 * vtw2r2 + 16384, 15); const int32_t vtmp2r0 = math_asr_s32(vout3r0 * vtw3r0 - vout3i0 * vtw3i0 + 16384, 15); const int32_t vtmp2i0 = math_asr_s32(vout3r0 * vtw3i0 + vout3i0 * vtw3r0 + 16384, 15); const int32_t vtmp2r1 = math_asr_s32(vout3r1 * vtw3r1 - vout3i1 * vtw3i1 + 16384, 15); const int32_t vtmp2i1 = math_asr_s32(vout3r1 * vtw3i1 + vout3i1 * vtw3r1 + 16384, 15); const int32_t vtmp2r2 = math_asr_s32(vout3r2 * vtw3r2 - vout3i2 * vtw3i2 + 16384, 15); const int32_t vtmp2i2 = math_asr_s32(vout3r2 * vtw3i2 + vout3i2 * vtw3r2 + 16384, 15); const int32_t vtmp5r0 = vout0r0 - vtmp1r0; const int32_t vtmp5i0 = vout0i0 - vtmp1i0; const int32_t vtmp5r1 = vout0r1 - vtmp1r1; const int32_t vtmp5i1 = vout0i1 - vtmp1i1; const int32_t vtmp5r2 = vout0r2 - vtmp1r2; const int32_t vtmp5i2 = vout0i2 - vtmp1i2; vout0r0 += vtmp1r0; vout0i0 += vtmp1i0; vout0r1 += vtmp1r1; vout0i1 += vtmp1i1; vout0r2 += vtmp1r2; vout0i2 += vtmp1i2; const int32_t vtmp3r0 = vtmp0r0 + vtmp2r0; const int32_t vtmp3i0 = vtmp0i0 + vtmp2i0; const int32_t vtmp3r1 = vtmp0r1 + vtmp2r1; const int32_t vtmp3i1 = vtmp0i1 + vtmp2i1; const int32_t vtmp3r2 = vtmp0r2 + vtmp2r2; const int32_t vtmp3i2 = vtmp0i2 + vtmp2i2; const int32_t vtmp4r0 = vtmp0i0 - vtmp2i0; const int32_t vtmp4i0 = -(vtmp0r0 - vtmp2r0); // swap r,i and neg i const int32_t vtmp4r1 = vtmp0i1 - vtmp2i1; const int32_t vtmp4i1 = -(vtmp0r1 - vtmp2r1); // swap r,i and neg i const int32_t vtmp4r2 = vtmp0i2 - vtmp2i2; const int32_t vtmp4i2 = -(vtmp0r2 - vtmp2r2); // swap r,i and neg i vout2r0 = vout0r0 - vtmp3r0; vout2i0 = vout0i0 - vtmp3i0; vout2r1 = vout0r1 - vtmp3r1; vout2i1 = vout0i1 - vtmp3i1; vout2r2 = vout0r2 - vtmp3r2; vout2i2 = vout0i2 - vtmp3i2; vout0r0 += vtmp3r0; vout0i0 += vtmp3i0; vout0r1 += vtmp3r1; vout0i1 += vtmp3i1; vout0r2 += vtmp3r2; vout0i2 += vtmp3i2; vout1r0 = vtmp5r0 + vtmp4r0; vout1i0 = vtmp5i0 + vtmp4i0; vout1r1 = vtmp5r1 + vtmp4r1; vout1i1 = vtmp5i1 + vtmp4i1; vout1r2 = vtmp5r2 + vtmp4r2; vout1i2 = vtmp5i2 + vtmp4i2; vout3r0 = vtmp5r0 - vtmp4r0; vout3i0 = vtmp5i0 - vtmp4i0; vout3r1 = vtmp5r1 - vtmp4r1; vout3i1 = vtmp5i1 - vtmp4i1; vout3r2 = vtmp5r2 - vtmp4r2; vout3i2 = vtmp5i2 - vtmp4i2; data0[0] = (int16_t) vout0r0; data0[1] = (int16_t) vout0i0; data0[2] = (int16_t) vout0r1; data0[3] = (int16_t) vout0i1; data0[4] = (int16_t) vout0r2; data0[5] = (int16_t) vout0i2; data1[0] = (int16_t) vout1r0; data1[1] = (int16_t) vout1i0; data1[2] = (int16_t) vout1r1; data1[3] = (int16_t) vout1i1; data1[4] = (int16_t) vout1r2; data1[5] = (int16_t) vout1i2; data2[0] = (int16_t) vout2r0; data2[1] = (int16_t) vout2i0; data2[2] = (int16_t) vout2r1; data2[3] = (int16_t) vout2i1; data2[4] = (int16_t) vout2r2; data2[5] = (int16_t) vout2i2; data3[0] = (int16_t) vout3r0; data3[1] = (int16_t) vout3i0; data3[2] = (int16_t) vout3r1; data3[3] = (int16_t) vout3i1; data3[4] = (int16_t) vout3r2; data3[5] = (int16_t) vout3i2; data3 += 3 * 2; } while (--batch != 0); }
10,921
39.451852
90
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/gen/cs16-bfly4-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/cs16-bfly4/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_bfly4_ukernel__scalar_x1( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 2) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); size_t s = samples - sizeof(int16_t) * 2; // First sample skips twiddle. // Same code as samples=1 but supports stride { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp5r = vout0r - vout2r; const int32_t vtmp5i = vout0i - vout2i; vout0r += vout2r; vout0i += vout2i; const int32_t vtmp3r = vout1r + vout3r; const int32_t vtmp3i = vout1i + vout3i; const int32_t vtmp4r = vout1i - vout3i; const int32_t vtmp4i = -(vout1r - vout3r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; } if XNN_UNLIKELY(s != 0) { do { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; const int32_t vtw1r = (const int32_t) tw1[0]; const int32_t vtw1i = (const int32_t) tw1[1]; const int32_t vtw2r = (const int32_t) tw2[0]; const int32_t vtw2i = (const int32_t) tw2[1]; const int32_t vtw3r = (const int32_t) tw3[0]; const int32_t vtw3i = (const int32_t) tw3[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp0r = math_asr_s32(vout1r * vtw1r - vout1i * vtw1i + 16384, 15); const int32_t vtmp0i = math_asr_s32(vout1r * vtw1i + vout1i * vtw1r + 16384, 15); const int32_t vtmp1r = math_asr_s32(vout2r * vtw2r - vout2i * vtw2i + 16384, 15); const int32_t vtmp1i = math_asr_s32(vout2r * vtw2i + vout2i * vtw2r + 16384, 15); const int32_t vtmp2r = math_asr_s32(vout3r * vtw3r - vout3i * vtw3i + 16384, 15); const int32_t vtmp2i = math_asr_s32(vout3r * vtw3i + vout3i * vtw3r + 16384, 15); const int32_t vtmp5r = vout0r - vtmp1r; const int32_t vtmp5i = vout0i - vtmp1i; vout0r += vtmp1r; vout0i += vtmp1i; const int32_t vtmp3r = vtmp0r + vtmp2r; const int32_t vtmp3i = vtmp0i + vtmp2i; const int32_t vtmp4r = vtmp0i - vtmp2i; const int32_t vtmp4i = -(vtmp0r - vtmp2r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; s -= sizeof(int16_t) * 2; } while (s != 0); } } while (--batch != 0); }
6,218
33.743017
89
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/gen/cs16-bfly4-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/cs16-bfly4/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_bfly4_ukernel__scalar_x2( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 2) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); size_t s = samples - sizeof(int16_t) * 2; // First sample skips twiddle. // Same code as samples=1 but supports stride { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp5r = vout0r - vout2r; const int32_t vtmp5i = vout0i - vout2i; vout0r += vout2r; vout0i += vout2i; const int32_t vtmp3r = vout1r + vout3r; const int32_t vtmp3i = vout1i + vout3i; const int32_t vtmp4r = vout1i - vout3i; const int32_t vtmp4i = -(vout1r - vout3r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; } for (; s >= 2 * sizeof(int16_t) * 2; s -= 2 * sizeof(int16_t) * 2) { int32_t vout0r0 = (int32_t) data0[0]; int32_t vout0i0 = (int32_t) data0[1]; int32_t vout0r1 = (int32_t) data0[2]; int32_t vout0i1 = (int32_t) data0[3]; int32_t vout1r0 = (int32_t) data1[0]; int32_t vout1i0 = (int32_t) data1[1]; int32_t vout1r1 = (int32_t) data1[2]; int32_t vout1i1 = (int32_t) data1[3]; int32_t vout2r0 = (int32_t) data2[0]; int32_t vout2i0 = (int32_t) data2[1]; int32_t vout2r1 = (int32_t) data2[2]; int32_t vout2i1 = (int32_t) data2[3]; int32_t vout3r0 = (int32_t) data3[0]; int32_t vout3i0 = (int32_t) data3[1]; int32_t vout3r1 = (int32_t) data3[2]; int32_t vout3i1 = (int32_t) data3[3]; const int32_t vtw1r0 = (const int32_t) tw1[0]; const int32_t vtw1i0 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r1 = (const int32_t) tw1[0]; const int32_t vtw1i1 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw2r0 = (const int32_t) tw2[0]; const int32_t vtw2i0 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r1 = (const int32_t) tw2[0]; const int32_t vtw2i1 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw3r0 = (const int32_t) tw3[0]; const int32_t vtw3i0 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r1 = (const int32_t) tw3[0]; const int32_t vtw3i1 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0r0 = math_asr_s32(vout0r0 * 8191 + 16384, 15); vout0i0 = math_asr_s32(vout0i0 * 8191 + 16384, 15); vout0r1 = math_asr_s32(vout0r1 * 8191 + 16384, 15); vout0i1 = math_asr_s32(vout0i1 * 8191 + 16384, 15); vout1r0 = math_asr_s32(vout1r0 * 8191 + 16384, 15); vout1i0 = math_asr_s32(vout1i0 * 8191 + 16384, 15); vout1r1 = math_asr_s32(vout1r1 * 8191 + 16384, 15); vout1i1 = math_asr_s32(vout1i1 * 8191 + 16384, 15); vout2r0 = math_asr_s32(vout2r0 * 8191 + 16384, 15); vout2i0 = math_asr_s32(vout2i0 * 8191 + 16384, 15); vout2r1 = math_asr_s32(vout2r1 * 8191 + 16384, 15); vout2i1 = math_asr_s32(vout2i1 * 8191 + 16384, 15); vout3r0 = math_asr_s32(vout3r0 * 8191 + 16384, 15); vout3i0 = math_asr_s32(vout3i0 * 8191 + 16384, 15); vout3r1 = math_asr_s32(vout3r1 * 8191 + 16384, 15); vout3i1 = math_asr_s32(vout3i1 * 8191 + 16384, 15); const int32_t vtmp0r0 = math_asr_s32(vout1r0 * vtw1r0 - vout1i0 * vtw1i0 + 16384, 15); const int32_t vtmp0i0 = math_asr_s32(vout1r0 * vtw1i0 + vout1i0 * vtw1r0 + 16384, 15); const int32_t vtmp0r1 = math_asr_s32(vout1r1 * vtw1r1 - vout1i1 * vtw1i1 + 16384, 15); const int32_t vtmp0i1 = math_asr_s32(vout1r1 * vtw1i1 + vout1i1 * vtw1r1 + 16384, 15); const int32_t vtmp1r0 = math_asr_s32(vout2r0 * vtw2r0 - vout2i0 * vtw2i0 + 16384, 15); const int32_t vtmp1i0 = math_asr_s32(vout2r0 * vtw2i0 + vout2i0 * vtw2r0 + 16384, 15); const int32_t vtmp1r1 = math_asr_s32(vout2r1 * vtw2r1 - vout2i1 * vtw2i1 + 16384, 15); const int32_t vtmp1i1 = math_asr_s32(vout2r1 * vtw2i1 + vout2i1 * vtw2r1 + 16384, 15); const int32_t vtmp2r0 = math_asr_s32(vout3r0 * vtw3r0 - vout3i0 * vtw3i0 + 16384, 15); const int32_t vtmp2i0 = math_asr_s32(vout3r0 * vtw3i0 + vout3i0 * vtw3r0 + 16384, 15); const int32_t vtmp2r1 = math_asr_s32(vout3r1 * vtw3r1 - vout3i1 * vtw3i1 + 16384, 15); const int32_t vtmp2i1 = math_asr_s32(vout3r1 * vtw3i1 + vout3i1 * vtw3r1 + 16384, 15); const int32_t vtmp5r0 = vout0r0 - vtmp1r0; const int32_t vtmp5i0 = vout0i0 - vtmp1i0; const int32_t vtmp5r1 = vout0r1 - vtmp1r1; const int32_t vtmp5i1 = vout0i1 - vtmp1i1; vout0r0 += vtmp1r0; vout0i0 += vtmp1i0; vout0r1 += vtmp1r1; vout0i1 += vtmp1i1; const int32_t vtmp3r0 = vtmp0r0 + vtmp2r0; const int32_t vtmp3i0 = vtmp0i0 + vtmp2i0; const int32_t vtmp3r1 = vtmp0r1 + vtmp2r1; const int32_t vtmp3i1 = vtmp0i1 + vtmp2i1; const int32_t vtmp4r0 = vtmp0i0 - vtmp2i0; const int32_t vtmp4i0 = -(vtmp0r0 - vtmp2r0); // swap r,i and neg i const int32_t vtmp4r1 = vtmp0i1 - vtmp2i1; const int32_t vtmp4i1 = -(vtmp0r1 - vtmp2r1); // swap r,i and neg i vout2r0 = vout0r0 - vtmp3r0; vout2i0 = vout0i0 - vtmp3i0; vout2r1 = vout0r1 - vtmp3r1; vout2i1 = vout0i1 - vtmp3i1; vout0r0 += vtmp3r0; vout0i0 += vtmp3i0; vout0r1 += vtmp3r1; vout0i1 += vtmp3i1; vout1r0 = vtmp5r0 + vtmp4r0; vout1i0 = vtmp5i0 + vtmp4i0; vout1r1 = vtmp5r1 + vtmp4r1; vout1i1 = vtmp5i1 + vtmp4i1; vout3r0 = vtmp5r0 - vtmp4r0; vout3i0 = vtmp5i0 - vtmp4i0; vout3r1 = vtmp5r1 - vtmp4r1; vout3i1 = vtmp5i1 - vtmp4i1; data0[0] = (int16_t) vout0r0; data0[1] = (int16_t) vout0i0; data0[2] = (int16_t) vout0r1; data0[3] = (int16_t) vout0i1; data0 += 2 * 2; data1[0] = (int16_t) vout1r0; data1[1] = (int16_t) vout1i0; data1[2] = (int16_t) vout1r1; data1[3] = (int16_t) vout1i1; data1 += 2 * 2; data2[0] = (int16_t) vout2r0; data2[1] = (int16_t) vout2i0; data2[2] = (int16_t) vout2r1; data2[3] = (int16_t) vout2i1; data2 += 2 * 2; data3[0] = (int16_t) vout3r0; data3[1] = (int16_t) vout3i0; data3[2] = (int16_t) vout3r1; data3[3] = (int16_t) vout3i1; data3 += 2 * 2; } if XNN_UNLIKELY(s != 0) { do { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; const int32_t vtw1r = (const int32_t) tw1[0]; const int32_t vtw1i = (const int32_t) tw1[1]; const int32_t vtw2r = (const int32_t) tw2[0]; const int32_t vtw2i = (const int32_t) tw2[1]; const int32_t vtw3r = (const int32_t) tw3[0]; const int32_t vtw3i = (const int32_t) tw3[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp0r = math_asr_s32(vout1r * vtw1r - vout1i * vtw1i + 16384, 15); const int32_t vtmp0i = math_asr_s32(vout1r * vtw1i + vout1i * vtw1r + 16384, 15); const int32_t vtmp1r = math_asr_s32(vout2r * vtw2r - vout2i * vtw2i + 16384, 15); const int32_t vtmp1i = math_asr_s32(vout2r * vtw2i + vout2i * vtw2r + 16384, 15); const int32_t vtmp2r = math_asr_s32(vout3r * vtw3r - vout3i * vtw3i + 16384, 15); const int32_t vtmp2i = math_asr_s32(vout3r * vtw3i + vout3i * vtw3r + 16384, 15); const int32_t vtmp5r = vout0r - vtmp1r; const int32_t vtmp5i = vout0i - vtmp1i; vout0r += vtmp1r; vout0i += vtmp1i; const int32_t vtmp3r = vtmp0r + vtmp2r; const int32_t vtmp3i = vtmp0i + vtmp2i; const int32_t vtmp4r = vtmp0i - vtmp2i; const int32_t vtmp4i = -(vtmp0r - vtmp2r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; s -= sizeof(int16_t) * 2; } while (s != 0); } } while (--batch != 0); }
12,025
38.953488
92
c
XNNPACK
XNNPACK-master/src/cs16-bfly4/gen/cs16-bfly4-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/cs16-bfly4/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_bfly4_ukernel__scalar_x4( size_t batch, size_t samples, int16_t* data, const int16_t* twiddle, size_t stride) { assert(batch != 0); assert(samples != 0); assert(samples % (sizeof(int16_t) * 2) == 0); assert(data != NULL); assert(stride != 0); assert(twiddle != NULL); int16_t* data3 = data; do { int16_t* data0 = data3; int16_t* data1 = (int16_t*) ((uintptr_t) data0 + samples); int16_t* data2 = (int16_t*) ((uintptr_t) data1 + samples); data3 = (int16_t*) ((uintptr_t) data2 + samples); const int16_t* tw1 = twiddle; const int16_t* tw2 = twiddle; const int16_t* tw3 = twiddle; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); size_t s = samples - sizeof(int16_t) * 2; // First sample skips twiddle. // Same code as samples=1 but supports stride { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp5r = vout0r - vout2r; const int32_t vtmp5i = vout0i - vout2i; vout0r += vout2r; vout0i += vout2i; const int32_t vtmp3r = vout1r + vout3r; const int32_t vtmp3i = vout1i + vout3i; const int32_t vtmp4r = vout1i - vout3i; const int32_t vtmp4i = -(vout1r - vout3r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; } for (; s >= 4 * sizeof(int16_t) * 2; s -= 4 * sizeof(int16_t) * 2) { int32_t vout0r0 = (int32_t) data0[0]; int32_t vout0i0 = (int32_t) data0[1]; int32_t vout0r1 = (int32_t) data0[2]; int32_t vout0i1 = (int32_t) data0[3]; int32_t vout0r2 = (int32_t) data0[4]; int32_t vout0i2 = (int32_t) data0[5]; int32_t vout0r3 = (int32_t) data0[6]; int32_t vout0i3 = (int32_t) data0[7]; int32_t vout1r0 = (int32_t) data1[0]; int32_t vout1i0 = (int32_t) data1[1]; int32_t vout1r1 = (int32_t) data1[2]; int32_t vout1i1 = (int32_t) data1[3]; int32_t vout1r2 = (int32_t) data1[4]; int32_t vout1i2 = (int32_t) data1[5]; int32_t vout1r3 = (int32_t) data1[6]; int32_t vout1i3 = (int32_t) data1[7]; int32_t vout2r0 = (int32_t) data2[0]; int32_t vout2i0 = (int32_t) data2[1]; int32_t vout2r1 = (int32_t) data2[2]; int32_t vout2i1 = (int32_t) data2[3]; int32_t vout2r2 = (int32_t) data2[4]; int32_t vout2i2 = (int32_t) data2[5]; int32_t vout2r3 = (int32_t) data2[6]; int32_t vout2i3 = (int32_t) data2[7]; int32_t vout3r0 = (int32_t) data3[0]; int32_t vout3i0 = (int32_t) data3[1]; int32_t vout3r1 = (int32_t) data3[2]; int32_t vout3i1 = (int32_t) data3[3]; int32_t vout3r2 = (int32_t) data3[4]; int32_t vout3i2 = (int32_t) data3[5]; int32_t vout3r3 = (int32_t) data3[6]; int32_t vout3i3 = (int32_t) data3[7]; const int32_t vtw1r0 = (const int32_t) tw1[0]; const int32_t vtw1i0 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r1 = (const int32_t) tw1[0]; const int32_t vtw1i1 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r2 = (const int32_t) tw1[0]; const int32_t vtw1i2 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw1r3 = (const int32_t) tw1[0]; const int32_t vtw1i3 = (const int32_t) tw1[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); const int32_t vtw2r0 = (const int32_t) tw2[0]; const int32_t vtw2i0 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r1 = (const int32_t) tw2[0]; const int32_t vtw2i1 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r2 = (const int32_t) tw2[0]; const int32_t vtw2i2 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw2r3 = (const int32_t) tw2[0]; const int32_t vtw2i3 = (const int32_t) tw2[1]; tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); const int32_t vtw3r0 = (const int32_t) tw3[0]; const int32_t vtw3i0 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r1 = (const int32_t) tw3[0]; const int32_t vtw3i1 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r2 = (const int32_t) tw3[0]; const int32_t vtw3i2 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); const int32_t vtw3r3 = (const int32_t) tw3[0]; const int32_t vtw3i3 = (const int32_t) tw3[1]; tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0r0 = math_asr_s32(vout0r0 * 8191 + 16384, 15); vout0i0 = math_asr_s32(vout0i0 * 8191 + 16384, 15); vout0r1 = math_asr_s32(vout0r1 * 8191 + 16384, 15); vout0i1 = math_asr_s32(vout0i1 * 8191 + 16384, 15); vout0r2 = math_asr_s32(vout0r2 * 8191 + 16384, 15); vout0i2 = math_asr_s32(vout0i2 * 8191 + 16384, 15); vout0r3 = math_asr_s32(vout0r3 * 8191 + 16384, 15); vout0i3 = math_asr_s32(vout0i3 * 8191 + 16384, 15); vout1r0 = math_asr_s32(vout1r0 * 8191 + 16384, 15); vout1i0 = math_asr_s32(vout1i0 * 8191 + 16384, 15); vout1r1 = math_asr_s32(vout1r1 * 8191 + 16384, 15); vout1i1 = math_asr_s32(vout1i1 * 8191 + 16384, 15); vout1r2 = math_asr_s32(vout1r2 * 8191 + 16384, 15); vout1i2 = math_asr_s32(vout1i2 * 8191 + 16384, 15); vout1r3 = math_asr_s32(vout1r3 * 8191 + 16384, 15); vout1i3 = math_asr_s32(vout1i3 * 8191 + 16384, 15); vout2r0 = math_asr_s32(vout2r0 * 8191 + 16384, 15); vout2i0 = math_asr_s32(vout2i0 * 8191 + 16384, 15); vout2r1 = math_asr_s32(vout2r1 * 8191 + 16384, 15); vout2i1 = math_asr_s32(vout2i1 * 8191 + 16384, 15); vout2r2 = math_asr_s32(vout2r2 * 8191 + 16384, 15); vout2i2 = math_asr_s32(vout2i2 * 8191 + 16384, 15); vout2r3 = math_asr_s32(vout2r3 * 8191 + 16384, 15); vout2i3 = math_asr_s32(vout2i3 * 8191 + 16384, 15); vout3r0 = math_asr_s32(vout3r0 * 8191 + 16384, 15); vout3i0 = math_asr_s32(vout3i0 * 8191 + 16384, 15); vout3r1 = math_asr_s32(vout3r1 * 8191 + 16384, 15); vout3i1 = math_asr_s32(vout3i1 * 8191 + 16384, 15); vout3r2 = math_asr_s32(vout3r2 * 8191 + 16384, 15); vout3i2 = math_asr_s32(vout3i2 * 8191 + 16384, 15); vout3r3 = math_asr_s32(vout3r3 * 8191 + 16384, 15); vout3i3 = math_asr_s32(vout3i3 * 8191 + 16384, 15); const int32_t vtmp0r0 = math_asr_s32(vout1r0 * vtw1r0 - vout1i0 * vtw1i0 + 16384, 15); const int32_t vtmp0i0 = math_asr_s32(vout1r0 * vtw1i0 + vout1i0 * vtw1r0 + 16384, 15); const int32_t vtmp0r1 = math_asr_s32(vout1r1 * vtw1r1 - vout1i1 * vtw1i1 + 16384, 15); const int32_t vtmp0i1 = math_asr_s32(vout1r1 * vtw1i1 + vout1i1 * vtw1r1 + 16384, 15); const int32_t vtmp0r2 = math_asr_s32(vout1r2 * vtw1r2 - vout1i2 * vtw1i2 + 16384, 15); const int32_t vtmp0i2 = math_asr_s32(vout1r2 * vtw1i2 + vout1i2 * vtw1r2 + 16384, 15); const int32_t vtmp0r3 = math_asr_s32(vout1r3 * vtw1r3 - vout1i3 * vtw1i3 + 16384, 15); const int32_t vtmp0i3 = math_asr_s32(vout1r3 * vtw1i3 + vout1i3 * vtw1r3 + 16384, 15); const int32_t vtmp1r0 = math_asr_s32(vout2r0 * vtw2r0 - vout2i0 * vtw2i0 + 16384, 15); const int32_t vtmp1i0 = math_asr_s32(vout2r0 * vtw2i0 + vout2i0 * vtw2r0 + 16384, 15); const int32_t vtmp1r1 = math_asr_s32(vout2r1 * vtw2r1 - vout2i1 * vtw2i1 + 16384, 15); const int32_t vtmp1i1 = math_asr_s32(vout2r1 * vtw2i1 + vout2i1 * vtw2r1 + 16384, 15); const int32_t vtmp1r2 = math_asr_s32(vout2r2 * vtw2r2 - vout2i2 * vtw2i2 + 16384, 15); const int32_t vtmp1i2 = math_asr_s32(vout2r2 * vtw2i2 + vout2i2 * vtw2r2 + 16384, 15); const int32_t vtmp1r3 = math_asr_s32(vout2r3 * vtw2r3 - vout2i3 * vtw2i3 + 16384, 15); const int32_t vtmp1i3 = math_asr_s32(vout2r3 * vtw2i3 + vout2i3 * vtw2r3 + 16384, 15); const int32_t vtmp2r0 = math_asr_s32(vout3r0 * vtw3r0 - vout3i0 * vtw3i0 + 16384, 15); const int32_t vtmp2i0 = math_asr_s32(vout3r0 * vtw3i0 + vout3i0 * vtw3r0 + 16384, 15); const int32_t vtmp2r1 = math_asr_s32(vout3r1 * vtw3r1 - vout3i1 * vtw3i1 + 16384, 15); const int32_t vtmp2i1 = math_asr_s32(vout3r1 * vtw3i1 + vout3i1 * vtw3r1 + 16384, 15); const int32_t vtmp2r2 = math_asr_s32(vout3r2 * vtw3r2 - vout3i2 * vtw3i2 + 16384, 15); const int32_t vtmp2i2 = math_asr_s32(vout3r2 * vtw3i2 + vout3i2 * vtw3r2 + 16384, 15); const int32_t vtmp2r3 = math_asr_s32(vout3r3 * vtw3r3 - vout3i3 * vtw3i3 + 16384, 15); const int32_t vtmp2i3 = math_asr_s32(vout3r3 * vtw3i3 + vout3i3 * vtw3r3 + 16384, 15); const int32_t vtmp5r0 = vout0r0 - vtmp1r0; const int32_t vtmp5i0 = vout0i0 - vtmp1i0; const int32_t vtmp5r1 = vout0r1 - vtmp1r1; const int32_t vtmp5i1 = vout0i1 - vtmp1i1; const int32_t vtmp5r2 = vout0r2 - vtmp1r2; const int32_t vtmp5i2 = vout0i2 - vtmp1i2; const int32_t vtmp5r3 = vout0r3 - vtmp1r3; const int32_t vtmp5i3 = vout0i3 - vtmp1i3; vout0r0 += vtmp1r0; vout0i0 += vtmp1i0; vout0r1 += vtmp1r1; vout0i1 += vtmp1i1; vout0r2 += vtmp1r2; vout0i2 += vtmp1i2; vout0r3 += vtmp1r3; vout0i3 += vtmp1i3; const int32_t vtmp3r0 = vtmp0r0 + vtmp2r0; const int32_t vtmp3i0 = vtmp0i0 + vtmp2i0; const int32_t vtmp3r1 = vtmp0r1 + vtmp2r1; const int32_t vtmp3i1 = vtmp0i1 + vtmp2i1; const int32_t vtmp3r2 = vtmp0r2 + vtmp2r2; const int32_t vtmp3i2 = vtmp0i2 + vtmp2i2; const int32_t vtmp3r3 = vtmp0r3 + vtmp2r3; const int32_t vtmp3i3 = vtmp0i3 + vtmp2i3; const int32_t vtmp4r0 = vtmp0i0 - vtmp2i0; const int32_t vtmp4i0 = -(vtmp0r0 - vtmp2r0); // swap r,i and neg i const int32_t vtmp4r1 = vtmp0i1 - vtmp2i1; const int32_t vtmp4i1 = -(vtmp0r1 - vtmp2r1); // swap r,i and neg i const int32_t vtmp4r2 = vtmp0i2 - vtmp2i2; const int32_t vtmp4i2 = -(vtmp0r2 - vtmp2r2); // swap r,i and neg i const int32_t vtmp4r3 = vtmp0i3 - vtmp2i3; const int32_t vtmp4i3 = -(vtmp0r3 - vtmp2r3); // swap r,i and neg i vout2r0 = vout0r0 - vtmp3r0; vout2i0 = vout0i0 - vtmp3i0; vout2r1 = vout0r1 - vtmp3r1; vout2i1 = vout0i1 - vtmp3i1; vout2r2 = vout0r2 - vtmp3r2; vout2i2 = vout0i2 - vtmp3i2; vout2r3 = vout0r3 - vtmp3r3; vout2i3 = vout0i3 - vtmp3i3; vout0r0 += vtmp3r0; vout0i0 += vtmp3i0; vout0r1 += vtmp3r1; vout0i1 += vtmp3i1; vout0r2 += vtmp3r2; vout0i2 += vtmp3i2; vout0r3 += vtmp3r3; vout0i3 += vtmp3i3; vout1r0 = vtmp5r0 + vtmp4r0; vout1i0 = vtmp5i0 + vtmp4i0; vout1r1 = vtmp5r1 + vtmp4r1; vout1i1 = vtmp5i1 + vtmp4i1; vout1r2 = vtmp5r2 + vtmp4r2; vout1i2 = vtmp5i2 + vtmp4i2; vout1r3 = vtmp5r3 + vtmp4r3; vout1i3 = vtmp5i3 + vtmp4i3; vout3r0 = vtmp5r0 - vtmp4r0; vout3i0 = vtmp5i0 - vtmp4i0; vout3r1 = vtmp5r1 - vtmp4r1; vout3i1 = vtmp5i1 - vtmp4i1; vout3r2 = vtmp5r2 - vtmp4r2; vout3i2 = vtmp5i2 - vtmp4i2; vout3r3 = vtmp5r3 - vtmp4r3; vout3i3 = vtmp5i3 - vtmp4i3; data0[0] = (int16_t) vout0r0; data0[1] = (int16_t) vout0i0; data0[2] = (int16_t) vout0r1; data0[3] = (int16_t) vout0i1; data0[4] = (int16_t) vout0r2; data0[5] = (int16_t) vout0i2; data0[6] = (int16_t) vout0r3; data0[7] = (int16_t) vout0i3; data0 += 4 * 2; data1[0] = (int16_t) vout1r0; data1[1] = (int16_t) vout1i0; data1[2] = (int16_t) vout1r1; data1[3] = (int16_t) vout1i1; data1[4] = (int16_t) vout1r2; data1[5] = (int16_t) vout1i2; data1[6] = (int16_t) vout1r3; data1[7] = (int16_t) vout1i3; data1 += 4 * 2; data2[0] = (int16_t) vout2r0; data2[1] = (int16_t) vout2i0; data2[2] = (int16_t) vout2r1; data2[3] = (int16_t) vout2i1; data2[4] = (int16_t) vout2r2; data2[5] = (int16_t) vout2i2; data2[6] = (int16_t) vout2r3; data2[7] = (int16_t) vout2i3; data2 += 4 * 2; data3[0] = (int16_t) vout3r0; data3[1] = (int16_t) vout3i0; data3[2] = (int16_t) vout3r1; data3[3] = (int16_t) vout3i1; data3[4] = (int16_t) vout3r2; data3[5] = (int16_t) vout3i2; data3[6] = (int16_t) vout3r3; data3[7] = (int16_t) vout3i3; data3 += 4 * 2; } if XNN_UNLIKELY(s != 0) { do { int32_t vout0r = (int32_t) data0[0]; int32_t vout0i = (int32_t) data0[1]; int32_t vout1r = (int32_t) data1[0]; int32_t vout1i = (int32_t) data1[1]; int32_t vout2r = (int32_t) data2[0]; int32_t vout2i = (int32_t) data2[1]; int32_t vout3r = (int32_t) data3[0]; int32_t vout3i = (int32_t) data3[1]; const int32_t vtw1r = (const int32_t) tw1[0]; const int32_t vtw1i = (const int32_t) tw1[1]; const int32_t vtw2r = (const int32_t) tw2[0]; const int32_t vtw2i = (const int32_t) tw2[1]; const int32_t vtw3r = (const int32_t) tw3[0]; const int32_t vtw3i = (const int32_t) tw3[1]; tw1 = (const int16_t*) ((uintptr_t) tw1 + stride); tw2 = (const int16_t*) ((uintptr_t) tw2 + stride * 2); tw3 = (const int16_t*) ((uintptr_t) tw3 + stride * 3); // Note 32767 / 4 = 8191. Should be 8192. vout0r = math_asr_s32(vout0r * 8191 + 16384, 15); vout0i = math_asr_s32(vout0i * 8191 + 16384, 15); vout1r = math_asr_s32(vout1r * 8191 + 16384, 15); vout1i = math_asr_s32(vout1i * 8191 + 16384, 15); vout2r = math_asr_s32(vout2r * 8191 + 16384, 15); vout2i = math_asr_s32(vout2i * 8191 + 16384, 15); vout3r = math_asr_s32(vout3r * 8191 + 16384, 15); vout3i = math_asr_s32(vout3i * 8191 + 16384, 15); const int32_t vtmp0r = math_asr_s32(vout1r * vtw1r - vout1i * vtw1i + 16384, 15); const int32_t vtmp0i = math_asr_s32(vout1r * vtw1i + vout1i * vtw1r + 16384, 15); const int32_t vtmp1r = math_asr_s32(vout2r * vtw2r - vout2i * vtw2i + 16384, 15); const int32_t vtmp1i = math_asr_s32(vout2r * vtw2i + vout2i * vtw2r + 16384, 15); const int32_t vtmp2r = math_asr_s32(vout3r * vtw3r - vout3i * vtw3i + 16384, 15); const int32_t vtmp2i = math_asr_s32(vout3r * vtw3i + vout3i * vtw3r + 16384, 15); const int32_t vtmp5r = vout0r - vtmp1r; const int32_t vtmp5i = vout0i - vtmp1i; vout0r += vtmp1r; vout0i += vtmp1i; const int32_t vtmp3r = vtmp0r + vtmp2r; const int32_t vtmp3i = vtmp0i + vtmp2i; const int32_t vtmp4r = vtmp0i - vtmp2i; const int32_t vtmp4i = -(vtmp0r - vtmp2r); // swap r,i and neg i vout2r = vout0r - vtmp3r; vout2i = vout0i - vtmp3i; vout0r += vtmp3r; vout0i += vtmp3i; vout1r = vtmp5r + vtmp4r; vout1i = vtmp5i + vtmp4i; vout3r = vtmp5r - vtmp4r; vout3i = vtmp5i - vtmp4i; data0[0] = (int16_t) vout0r; data0[1] = (int16_t) vout0i; data1[0] = (int16_t) vout1r; data1[1] = (int16_t) vout1i; data2[0] = (int16_t) vout2r; data2[1] = (int16_t) vout2i; data3[0] = (int16_t) vout3r; data3[1] = (int16_t) vout3i; data0 += 2; data1 += 2; data2 += 2; data3 += 2; s -= sizeof(int16_t) * 2; } while (s != 0); } } while (--batch != 0); }
17,611
41.851582
92
c
XNNPACK
XNNPACK-master/src/cs16-fftr/cs16-fftr-neon-x4.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> #include <arm_neon.h> void xnn_cs16_fftr_ukernel__neon_x4( size_t samples, int16_t* data, const int16_t* twiddle) { assert(samples != 0); assert(samples % 8 == 0); assert(data != NULL); assert(twiddle != NULL); int16_t* dl = data; int16_t* dr = data + samples * 2; int32_t vdcr = (int32_t) dl[0]; int32_t vdci = (int32_t) dl[1]; vdcr = math_asr_s32(vdcr * 16383 + 16384, 15); vdci = math_asr_s32(vdci * 16383 + 16384, 15); dl[0] = vdcr + vdci; dl[1] = 0; dl += 2; dr[0] = vdcr - vdci; dr[1] = 0; const int16x4_t vdiv2 = vdup_n_s16(16383); do { dr -= 8; const int16x4x2_t vil = vld2_s16(dl); const int16x4x2_t vir = vld2_s16(dr); const int16x4x2_t vtw = vld2_s16(twiddle); twiddle += 8; int16x4_t virr = vrev64_s16(vir.val[0]); int16x4_t viri = vrev64_s16(vir.val[1]); const int16x4_t vilr = vqrdmulh_s16(vil.val[0], vdiv2); const int16x4_t vili = vqrdmulh_s16(vil.val[1], vdiv2); virr = vqrdmulh_s16(virr, vdiv2); viri = vqrdmulh_s16(viri, vdiv2); const int32x4_t vacc1r = vaddl_s16(vilr, virr); const int32x4_t vacc1i = vsubl_s16(vili, viri); const int16x4_t vacc2r = vsub_s16(vilr, virr); const int16x4_t vacc2i = vadd_s16(vili, viri); int32x4_t vaccr = vmull_s16(vacc2r, vtw.val[0]); int32x4_t vacci = vmull_s16(vacc2r, vtw.val[1]); vaccr = vmlsl_s16(vaccr, vacc2i, vtw.val[1]); vacci = vmlal_s16(vacci, vacc2i, vtw.val[0]); vaccr = vrshrq_n_s32(vaccr, 15); vacci = vrshrq_n_s32(vacci, 15); const int32x4_t vacclr = vhaddq_s32(vacc1r, vaccr); const int32x4_t vaccli = vhaddq_s32(vacc1i, vacci); const int32x4_t vaccrr = vhsubq_s32(vacc1r, vaccr); const int32x4_t vaccri = vhsubq_s32(vacci, vacc1i); int16x4x2_t voutl; int16x4x2_t voutr; voutl.val[0] = vmovn_s32(vacclr); voutl.val[1] = vmovn_s32(vaccli); voutr.val[0] = vrev64_s16(vmovn_s32(vaccrr)); voutr.val[1] = vrev64_s16(vmovn_s32(vaccri)); vst2_s16(dl, voutl); vst2_s16(dr, voutr); dl += 8; samples -= 8; } while(samples != 0); }
2,367
26.534884
72
c
XNNPACK
XNNPACK-master/src/cs16-fftr/gen/cs16-fftr-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/cs16-fftr/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_fftr_ukernel__scalar_x1( size_t samples, int16_t* data, const int16_t* twiddle) { assert(samples != 0); assert(samples % 2 == 0); assert(data != NULL); assert(twiddle != NULL); int16_t* dl = data; int16_t* dr = data + samples * 2; int32_t vdcr = (int32_t) dl[0]; int32_t vdci = (int32_t) dl[1]; vdcr = math_asr_s32(vdcr * 16383 + 16384, 15); vdci = math_asr_s32(vdci * 16383 + 16384, 15); dl[0] = vdcr + vdci; dl[1] = 0; dl += 2; dr[0] = vdcr - vdci; dr[1] = 0; samples >>= 1; if XNN_UNLIKELY(samples != 0) { do { dr -= 2; int32_t vilr = (int32_t) dl[0]; int32_t vili = (int32_t) dl[1]; int32_t virr = (int32_t) dr[0]; int32_t viri = (int32_t) dr[1]; const int32_t vtwr = twiddle[0]; const int32_t vtwi = twiddle[1]; twiddle += 2; vilr = math_asr_s32(vilr * 16383 + 16384, 15); vili = math_asr_s32(vili * 16383 + 16384, 15); virr = math_asr_s32(virr * 16383 + 16384, 15); viri = math_asr_s32(viri * 16383 + 16384, 15); const int32_t vacc1r = vilr + virr; const int32_t vacc1i = vili - viri; const int32_t vacc2r = vilr - virr; const int32_t vacc2i = vili + viri; const int32_t vaccr = math_asr_s32(vacc2r * vtwr - vacc2i * vtwi + 16384, 15); const int32_t vacci = math_asr_s32(vacc2r * vtwi + vacc2i * vtwr + 16384, 15); dl[0] = math_asr_s32(vacc1r + vaccr, 1); dl[1] = math_asr_s32(vacc1i + vacci, 1); dr[0] = math_asr_s32(vacc1r - vaccr, 1); dr[1] = math_asr_s32(vacci - vacc1i, 1); dl += 2; } while (--samples != 0); } }
2,029
25.710526
84
c
XNNPACK
XNNPACK-master/src/cs16-fftr/gen/cs16-fftr-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/cs16-fftr/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_fftr_ukernel__scalar_x2( size_t samples, int16_t* data, const int16_t* twiddle) { assert(samples != 0); assert(samples % 2 == 0); assert(data != NULL); assert(twiddle != NULL); int16_t* dl = data; int16_t* dr = data + samples * 2; int32_t vdcr = (int32_t) dl[0]; int32_t vdci = (int32_t) dl[1]; vdcr = math_asr_s32(vdcr * 16383 + 16384, 15); vdci = math_asr_s32(vdci * 16383 + 16384, 15); dl[0] = vdcr + vdci; dl[1] = 0; dl += 2; dr[0] = vdcr - vdci; dr[1] = 0; samples >>= 1; for (; samples >= 2; samples -= 2) { dr -= 2 * 2; int32_t vilr0 = (int32_t) dl[0]; int32_t vili0 = (int32_t) dl[1]; int32_t vilr1 = (int32_t) dl[2]; int32_t vili1 = (int32_t) dl[3]; int32_t virr0 = (int32_t) dr[2]; int32_t viri0 = (int32_t) dr[3]; int32_t virr1 = (int32_t) dr[0]; int32_t viri1 = (int32_t) dr[1]; const int32_t vtwr0 = twiddle[0]; const int32_t vtwi0 = twiddle[1]; const int32_t vtwr1 = twiddle[2]; const int32_t vtwi1 = twiddle[3]; twiddle += 2 * 2; vilr0 = math_asr_s32(vilr0 * 16383 + 16384, 15); vili0 = math_asr_s32(vili0 * 16383 + 16384, 15); virr0 = math_asr_s32(virr0 * 16383 + 16384, 15); viri0 = math_asr_s32(viri0 * 16383 + 16384, 15); vilr1 = math_asr_s32(vilr1 * 16383 + 16384, 15); vili1 = math_asr_s32(vili1 * 16383 + 16384, 15); virr1 = math_asr_s32(virr1 * 16383 + 16384, 15); viri1 = math_asr_s32(viri1 * 16383 + 16384, 15); const int32_t vacc1r0 = vilr0 + virr0; const int32_t vacc1i0 = vili0 - viri0; const int32_t vacc2r0 = vilr0 - virr0; const int32_t vacc2i0 = vili0 + viri0; const int32_t vacc1r1 = vilr1 + virr1; const int32_t vacc1i1 = vili1 - viri1; const int32_t vacc2r1 = vilr1 - virr1; const int32_t vacc2i1 = vili1 + viri1; const int32_t vaccr0 = math_asr_s32(vacc2r0 * vtwr0 - vacc2i0 * vtwi0 + 16384, 15); const int32_t vacci0 = math_asr_s32(vacc2r0 * vtwi0 + vacc2i0 * vtwr0 + 16384, 15); const int32_t vaccr1 = math_asr_s32(vacc2r1 * vtwr1 - vacc2i1 * vtwi1 + 16384, 15); const int32_t vacci1 = math_asr_s32(vacc2r1 * vtwi1 + vacc2i1 * vtwr1 + 16384, 15); dl[0] = math_asr_s32(vacc1r0 + vaccr0, 1); dl[1] = math_asr_s32(vacc1i0 + vacci0, 1); dl[2] = math_asr_s32(vacc1r1 + vaccr1, 1); dl[3] = math_asr_s32(vacc1i1 + vacci1, 1); dr[2] = math_asr_s32(vacc1r0 - vaccr0, 1); dr[3] = math_asr_s32(vacci0 - vacc1i0, 1); dr[0] = math_asr_s32(vacc1r1 - vaccr1, 1); dr[1] = math_asr_s32(vacci1 - vacc1i1, 1); dl += 2 * 2; } if XNN_UNLIKELY(samples != 0) { do { dr -= 2; int32_t vilr = (int32_t) dl[0]; int32_t vili = (int32_t) dl[1]; int32_t virr = (int32_t) dr[0]; int32_t viri = (int32_t) dr[1]; const int32_t vtwr = twiddle[0]; const int32_t vtwi = twiddle[1]; twiddle += 2; vilr = math_asr_s32(vilr * 16383 + 16384, 15); vili = math_asr_s32(vili * 16383 + 16384, 15); virr = math_asr_s32(virr * 16383 + 16384, 15); viri = math_asr_s32(viri * 16383 + 16384, 15); const int32_t vacc1r = vilr + virr; const int32_t vacc1i = vili - viri; const int32_t vacc2r = vilr - virr; const int32_t vacc2i = vili + viri; const int32_t vaccr = math_asr_s32(vacc2r * vtwr - vacc2i * vtwi + 16384, 15); const int32_t vacci = math_asr_s32(vacc2r * vtwi + vacc2i * vtwr + 16384, 15); dl[0] = math_asr_s32(vacc1r + vaccr, 1); dl[1] = math_asr_s32(vacc1i + vacci, 1); dr[0] = math_asr_s32(vacc1r - vaccr, 1); dr[1] = math_asr_s32(vacci - vacc1i, 1); dl += 2; } while (--samples != 0); } }
4,075
31.870968
87
c
XNNPACK
XNNPACK-master/src/cs16-fftr/gen/cs16-fftr-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/cs16-fftr/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/fft.h> void xnn_cs16_fftr_ukernel__scalar_x4( size_t samples, int16_t* data, const int16_t* twiddle) { assert(samples != 0); assert(samples % 2 == 0); assert(data != NULL); assert(twiddle != NULL); int16_t* dl = data; int16_t* dr = data + samples * 2; int32_t vdcr = (int32_t) dl[0]; int32_t vdci = (int32_t) dl[1]; vdcr = math_asr_s32(vdcr * 16383 + 16384, 15); vdci = math_asr_s32(vdci * 16383 + 16384, 15); dl[0] = vdcr + vdci; dl[1] = 0; dl += 2; dr[0] = vdcr - vdci; dr[1] = 0; samples >>= 1; for (; samples >= 4; samples -= 4) { dr -= 4 * 2; int32_t vilr0 = (int32_t) dl[0]; int32_t vili0 = (int32_t) dl[1]; int32_t vilr1 = (int32_t) dl[2]; int32_t vili1 = (int32_t) dl[3]; int32_t vilr2 = (int32_t) dl[4]; int32_t vili2 = (int32_t) dl[5]; int32_t vilr3 = (int32_t) dl[6]; int32_t vili3 = (int32_t) dl[7]; int32_t virr0 = (int32_t) dr[6]; int32_t viri0 = (int32_t) dr[7]; int32_t virr1 = (int32_t) dr[4]; int32_t viri1 = (int32_t) dr[5]; int32_t virr2 = (int32_t) dr[2]; int32_t viri2 = (int32_t) dr[3]; int32_t virr3 = (int32_t) dr[0]; int32_t viri3 = (int32_t) dr[1]; const int32_t vtwr0 = twiddle[0]; const int32_t vtwi0 = twiddle[1]; const int32_t vtwr1 = twiddle[2]; const int32_t vtwi1 = twiddle[3]; const int32_t vtwr2 = twiddle[4]; const int32_t vtwi2 = twiddle[5]; const int32_t vtwr3 = twiddle[6]; const int32_t vtwi3 = twiddle[7]; twiddle += 4 * 2; vilr0 = math_asr_s32(vilr0 * 16383 + 16384, 15); vili0 = math_asr_s32(vili0 * 16383 + 16384, 15); virr0 = math_asr_s32(virr0 * 16383 + 16384, 15); viri0 = math_asr_s32(viri0 * 16383 + 16384, 15); vilr1 = math_asr_s32(vilr1 * 16383 + 16384, 15); vili1 = math_asr_s32(vili1 * 16383 + 16384, 15); virr1 = math_asr_s32(virr1 * 16383 + 16384, 15); viri1 = math_asr_s32(viri1 * 16383 + 16384, 15); vilr2 = math_asr_s32(vilr2 * 16383 + 16384, 15); vili2 = math_asr_s32(vili2 * 16383 + 16384, 15); virr2 = math_asr_s32(virr2 * 16383 + 16384, 15); viri2 = math_asr_s32(viri2 * 16383 + 16384, 15); vilr3 = math_asr_s32(vilr3 * 16383 + 16384, 15); vili3 = math_asr_s32(vili3 * 16383 + 16384, 15); virr3 = math_asr_s32(virr3 * 16383 + 16384, 15); viri3 = math_asr_s32(viri3 * 16383 + 16384, 15); const int32_t vacc1r0 = vilr0 + virr0; const int32_t vacc1i0 = vili0 - viri0; const int32_t vacc2r0 = vilr0 - virr0; const int32_t vacc2i0 = vili0 + viri0; const int32_t vacc1r1 = vilr1 + virr1; const int32_t vacc1i1 = vili1 - viri1; const int32_t vacc2r1 = vilr1 - virr1; const int32_t vacc2i1 = vili1 + viri1; const int32_t vacc1r2 = vilr2 + virr2; const int32_t vacc1i2 = vili2 - viri2; const int32_t vacc2r2 = vilr2 - virr2; const int32_t vacc2i2 = vili2 + viri2; const int32_t vacc1r3 = vilr3 + virr3; const int32_t vacc1i3 = vili3 - viri3; const int32_t vacc2r3 = vilr3 - virr3; const int32_t vacc2i3 = vili3 + viri3; const int32_t vaccr0 = math_asr_s32(vacc2r0 * vtwr0 - vacc2i0 * vtwi0 + 16384, 15); const int32_t vacci0 = math_asr_s32(vacc2r0 * vtwi0 + vacc2i0 * vtwr0 + 16384, 15); const int32_t vaccr1 = math_asr_s32(vacc2r1 * vtwr1 - vacc2i1 * vtwi1 + 16384, 15); const int32_t vacci1 = math_asr_s32(vacc2r1 * vtwi1 + vacc2i1 * vtwr1 + 16384, 15); const int32_t vaccr2 = math_asr_s32(vacc2r2 * vtwr2 - vacc2i2 * vtwi2 + 16384, 15); const int32_t vacci2 = math_asr_s32(vacc2r2 * vtwi2 + vacc2i2 * vtwr2 + 16384, 15); const int32_t vaccr3 = math_asr_s32(vacc2r3 * vtwr3 - vacc2i3 * vtwi3 + 16384, 15); const int32_t vacci3 = math_asr_s32(vacc2r3 * vtwi3 + vacc2i3 * vtwr3 + 16384, 15); dl[0] = math_asr_s32(vacc1r0 + vaccr0, 1); dl[1] = math_asr_s32(vacc1i0 + vacci0, 1); dl[2] = math_asr_s32(vacc1r1 + vaccr1, 1); dl[3] = math_asr_s32(vacc1i1 + vacci1, 1); dl[4] = math_asr_s32(vacc1r2 + vaccr2, 1); dl[5] = math_asr_s32(vacc1i2 + vacci2, 1); dl[6] = math_asr_s32(vacc1r3 + vaccr3, 1); dl[7] = math_asr_s32(vacc1i3 + vacci3, 1); dr[6] = math_asr_s32(vacc1r0 - vaccr0, 1); dr[7] = math_asr_s32(vacci0 - vacc1i0, 1); dr[4] = math_asr_s32(vacc1r1 - vaccr1, 1); dr[5] = math_asr_s32(vacci1 - vacc1i1, 1); dr[2] = math_asr_s32(vacc1r2 - vaccr2, 1); dr[3] = math_asr_s32(vacci2 - vacc1i2, 1); dr[0] = math_asr_s32(vacc1r3 - vaccr3, 1); dr[1] = math_asr_s32(vacci3 - vacc1i3, 1); dl += 4 * 2; } if XNN_UNLIKELY(samples != 0) { do { dr -= 2; int32_t vilr = (int32_t) dl[0]; int32_t vili = (int32_t) dl[1]; int32_t virr = (int32_t) dr[0]; int32_t viri = (int32_t) dr[1]; const int32_t vtwr = twiddle[0]; const int32_t vtwi = twiddle[1]; twiddle += 2; vilr = math_asr_s32(vilr * 16383 + 16384, 15); vili = math_asr_s32(vili * 16383 + 16384, 15); virr = math_asr_s32(virr * 16383 + 16384, 15); viri = math_asr_s32(viri * 16383 + 16384, 15); const int32_t vacc1r = vilr + virr; const int32_t vacc1i = vili - viri; const int32_t vacc2r = vilr - virr; const int32_t vacc2i = vili + viri; const int32_t vaccr = math_asr_s32(vacc2r * vtwr - vacc2i * vtwi + 16384, 15); const int32_t vacci = math_asr_s32(vacc2r * vtwi + vacc2i * vtwr + 16384, 15); dl[0] = math_asr_s32(vacc1r + vaccr, 1); dl[1] = math_asr_s32(vacc1i + vacci, 1); dr[0] = math_asr_s32(vacc1r - vaccr, 1); dr[1] = math_asr_s32(vacci - vacc1i, 1); dl += 2; } while (--samples != 0); } }
6,019
35.707317
87
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x10.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x10( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 20 * sizeof(int16_t); batch -= 20 * sizeof(int16_t)) { HEXAGON_Vect64 vacc0 = *i++; HEXAGON_Vect64 vacc1 = *i++; HEXAGON_Vect64 vacc2 = *i++; HEXAGON_Vect64 vacc3 = *i++; HEXAGON_Vect64 vacc4 = *i++; vacc0 = Q6_P_vdmpy_PP_sat(vacc0, vacc0); vacc1 = Q6_P_vdmpy_PP_sat(vacc1, vacc1); vacc2 = Q6_P_vdmpy_PP_sat(vacc2, vacc2); vacc3 = Q6_P_vdmpy_PP_sat(vacc3, vacc3); vacc4 = Q6_P_vdmpy_PP_sat(vacc4, vacc4); *o++ = vacc0; *o++ = vacc1; *o++ = vacc2; *o++ = vacc3; *o++ = vacc4; } for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,783
26.446154
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x12.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x12( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 24 * sizeof(int16_t); batch -= 24 * sizeof(int16_t)) { HEXAGON_Vect64 vacc0 = *i++; HEXAGON_Vect64 vacc1 = *i++; HEXAGON_Vect64 vacc2 = *i++; HEXAGON_Vect64 vacc3 = *i++; HEXAGON_Vect64 vacc4 = *i++; HEXAGON_Vect64 vacc5 = *i++; vacc0 = Q6_P_vdmpy_PP_sat(vacc0, vacc0); vacc1 = Q6_P_vdmpy_PP_sat(vacc1, vacc1); vacc2 = Q6_P_vdmpy_PP_sat(vacc2, vacc2); vacc3 = Q6_P_vdmpy_PP_sat(vacc3, vacc3); vacc4 = Q6_P_vdmpy_PP_sat(vacc4, vacc4); vacc5 = Q6_P_vdmpy_PP_sat(vacc5, vacc5); *o++ = vacc0; *o++ = vacc1; *o++ = vacc2; *o++ = vacc3; *o++ = vacc4; *o++ = vacc5; } for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,879
26.647059
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x2.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x2( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,223
25.608696
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x4.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x4( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { HEXAGON_Vect64 vacc0 = *i++; HEXAGON_Vect64 vacc1 = *i++; vacc0 = Q6_P_vdmpy_PP_sat(vacc0, vacc0); vacc1 = Q6_P_vdmpy_PP_sat(vacc1, vacc1); *o++ = vacc0; *o++ = vacc1; } for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,492
25.660714
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x6.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x6( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 12 * sizeof(int16_t); batch -= 12 * sizeof(int16_t)) { HEXAGON_Vect64 vacc0 = *i++; HEXAGON_Vect64 vacc1 = *i++; HEXAGON_Vect64 vacc2 = *i++; vacc0 = Q6_P_vdmpy_PP_sat(vacc0, vacc0); vacc1 = Q6_P_vdmpy_PP_sat(vacc1, vacc1); vacc2 = Q6_P_vdmpy_PP_sat(vacc2, vacc2); *o++ = vacc0; *o++ = vacc1; *o++ = vacc2; } for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,590
25.966102
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-hexagon-x8.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/hexagon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <hexagon_protos.h> #include <hexagon_types.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__hexagon_x8( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); const HEXAGON_Vect64* i = (const HEXAGON_Vect64*) input; HEXAGON_Vect64* o = (HEXAGON_Vect64*) output; for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) { HEXAGON_Vect64 vacc0 = *i++; HEXAGON_Vect64 vacc1 = *i++; HEXAGON_Vect64 vacc2 = *i++; HEXAGON_Vect64 vacc3 = *i++; vacc0 = Q6_P_vdmpy_PP_sat(vacc0, vacc0); vacc1 = Q6_P_vdmpy_PP_sat(vacc1, vacc1); vacc2 = Q6_P_vdmpy_PP_sat(vacc2, vacc2); vacc3 = Q6_P_vdmpy_PP_sat(vacc3, vacc3); *o++ = vacc0; *o++ = vacc1; *o++ = vacc2; *o++ = vacc3; } for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { HEXAGON_Vect64 vacc = *i++; vacc = Q6_P_vdmpy_PP_sat(vacc, vacc); *o++ = vacc; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const HEXAGON_Vect32 vi = *((const HEXAGON_Vect32*) i); HEXAGON_Vect32 vacc = Q6_R_mpy_RlRl(vi, vi); vacc = Q6_R_mpyacc_RhRh_sat(vacc, vi, vi); *((HEXAGON_Vect32*) o) = vacc; } }
1,686
26.209677
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-neon-mlal-ld128-x12.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/neon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x12( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 24 * sizeof(int16_t); batch -= 24 * sizeof(int16_t)) { const int16x4x2_t vi0 = vld2_s16(input); input += 8; const int16x4x2_t vi1 = vld2_s16(input); input += 8; const int16x4x2_t vi2 = vld2_s16(input); input += 8; int32x4_t vacc0 = vmull_s16(vi0.val[0], vi0.val[0]); int32x4_t vacc1 = vmull_s16(vi1.val[0], vi1.val[0]); int32x4_t vacc2 = vmull_s16(vi2.val[0], vi2.val[0]); vacc0 = vmlal_s16(vacc0, vi0.val[1], vi0.val[1]); vacc1 = vmlal_s16(vacc1, vi1.val[1], vi1.val[1]); vacc2 = vmlal_s16(vacc2, vi2.val[1], vi2.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc0)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc1)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc2)); output += 4; } for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { const int16x4x2_t vi = vld2_s16(input); input += 8; int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc)); output += 4; } if XNN_LIKELY(batch != 0) { const int16x4x2_t vi = vld2_s16(input); int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); uint32x2_t vacc_lo = vreinterpret_u32_s32(vget_low_s32(vacc)); if (batch & (4 * sizeof(int16_t))) { vst1_u32(output, vacc_lo); output += 2; vacc_lo = vreinterpret_u32_s32(vget_high_s32(vacc)); } if (batch & (2 * sizeof(int16_t))) { vst1_lane_u32(output, vacc_lo, 0); } } }
2,226
32.742424
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-neon-mlal-ld128-x16.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/neon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x16( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 32 * sizeof(int16_t); batch -= 32 * sizeof(int16_t)) { const int16x4x2_t vi0 = vld2_s16(input); input += 8; const int16x4x2_t vi1 = vld2_s16(input); input += 8; const int16x4x2_t vi2 = vld2_s16(input); input += 8; const int16x4x2_t vi3 = vld2_s16(input); input += 8; int32x4_t vacc0 = vmull_s16(vi0.val[0], vi0.val[0]); int32x4_t vacc1 = vmull_s16(vi1.val[0], vi1.val[0]); int32x4_t vacc2 = vmull_s16(vi2.val[0], vi2.val[0]); int32x4_t vacc3 = vmull_s16(vi3.val[0], vi3.val[0]); vacc0 = vmlal_s16(vacc0, vi0.val[1], vi0.val[1]); vacc1 = vmlal_s16(vacc1, vi1.val[1], vi1.val[1]); vacc2 = vmlal_s16(vacc2, vi2.val[1], vi2.val[1]); vacc3 = vmlal_s16(vacc3, vi3.val[1], vi3.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc0)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc1)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc2)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc3)); output += 4; } for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { const int16x4x2_t vi = vld2_s16(input); input += 8; int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc)); output += 4; } if XNN_LIKELY(batch != 0) { const int16x4x2_t vi = vld2_s16(input); int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); uint32x2_t vacc_lo = vreinterpret_u32_s32(vget_low_s32(vacc)); if (batch & (4 * sizeof(int16_t))) { vst1_u32(output, vacc_lo); output += 2; vacc_lo = vreinterpret_u32_s32(vget_high_s32(vacc)); } if (batch & (2 * sizeof(int16_t))) { vst1_lane_u32(output, vacc_lo, 0); } } }
2,460
34.157143
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-neon-mlal-ld128-x4.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/neon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x4( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { const int16x4x2_t vi = vld2_s16(input); input += 8; int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc)); output += 4; } if XNN_LIKELY(batch != 0) { const int16x4x2_t vi = vld2_s16(input); int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); uint32x2_t vacc_lo = vreinterpret_u32_s32(vget_low_s32(vacc)); if (batch & (4 * sizeof(int16_t))) { vst1_u32(output, vacc_lo); output += 2; vacc_lo = vreinterpret_u32_s32(vget_high_s32(vacc)); } if (batch & (2 * sizeof(int16_t))) { vst1_lane_u32(output, vacc_lo, 0); } } }
1,443
28.469388
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-neon-mlal-ld128-x8.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/neon.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x8( size_t batch, const int16_t* input, uint32_t* output) XNN_OOB_READS { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) { const int16x4x2_t vi0 = vld2_s16(input); input += 8; const int16x4x2_t vi1 = vld2_s16(input); input += 8; int32x4_t vacc0 = vmull_s16(vi0.val[0], vi0.val[0]); int32x4_t vacc1 = vmull_s16(vi1.val[0], vi1.val[0]); vacc0 = vmlal_s16(vacc0, vi0.val[1], vi0.val[1]); vacc1 = vmlal_s16(vacc1, vi1.val[1], vi1.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc0)); output += 4; vst1q_u32(output, vreinterpretq_u32_s32(vacc1)); output += 4; } for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { const int16x4x2_t vi = vld2_s16(input); input += 8; int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); vst1q_u32(output, vreinterpretq_u32_s32(vacc)); output += 4; } if XNN_LIKELY(batch != 0) { const int16x4x2_t vi = vld2_s16(input); int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]); vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]); uint32x2_t vacc_lo = vreinterpret_u32_s32(vget_low_s32(vacc)); if (batch & (4 * sizeof(int16_t))) { vst1_u32(output, vacc_lo); output += 2; vacc_lo = vreinterpret_u32_s32(vget_high_s32(vacc)); } if (batch & (2 * sizeof(int16_t))) { vst1_lane_u32(output, vacc_lo, 0); } } }
1,991
31.129032
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__scalar_x1( size_t batch, const int16_t* input, uint32_t* output) { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); do { const int32_t vr = (int32_t) input[0]; const int32_t vi = (int32_t) input[1]; input += 2; uint32_t vacc = (uint32_t) (vr * vr); vacc += (uint32_t) (vi * vi); *output++ = vacc; batch -= sizeof(int16_t) * 2; } while (batch != 0); }
903
21.6
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__scalar_x2( size_t batch, const int16_t* input, uint32_t* output) { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) { const int32_t vr0 = (int32_t) input[0]; const int32_t vi0 = (int32_t) input[1]; const int32_t vr1 = (int32_t) input[2]; const int32_t vi1 = (int32_t) input[3]; input += 2 * 2; uint32_t vacc0 = (uint32_t) (vr0 * vr0); uint32_t vacc1 = (uint32_t) (vr1 * vr1); vacc0 += (uint32_t) (vi0 * vi0); vacc1 += (uint32_t) (vi1 * vi1); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_LIKELY(batch != 0) { assert(batch == 2 * sizeof(int16_t)); const int32_t vr = (int32_t) input[0]; const int32_t vi = (int32_t) input[1]; uint32_t vacc = (uint32_t) (vr * vr); vacc += (uint32_t) (vi * vi); *output = vacc; } }
1,398
23.54386
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-scalar-x3.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__scalar_x3( size_t batch, const int16_t* input, uint32_t* output) { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 6 * sizeof(int16_t); batch -= 6 * sizeof(int16_t)) { const int32_t vr0 = (int32_t) input[0]; const int32_t vi0 = (int32_t) input[1]; const int32_t vr1 = (int32_t) input[2]; const int32_t vi1 = (int32_t) input[3]; const int32_t vr2 = (int32_t) input[4]; const int32_t vi2 = (int32_t) input[5]; input += 3 * 2; uint32_t vacc0 = (uint32_t) (vr0 * vr0); uint32_t vacc1 = (uint32_t) (vr1 * vr1); uint32_t vacc2 = (uint32_t) (vr2 * vr2); vacc0 += (uint32_t) (vi0 * vi0); vacc1 += (uint32_t) (vi1 * vi1); vacc2 += (uint32_t) (vi2 * vi2); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output += 3; } if XNN_LIKELY(batch != 0) { do { const int32_t vr = (int32_t) input[0]; const int32_t vi = (int32_t) input[1]; input += 2; uint32_t vacc = (uint32_t) (vr * vr); vacc += (uint32_t) (vi * vi); *output++ = vacc; batch -= sizeof(int16_t) * 2; } while (batch != 0); } }
1,649
24.78125
72
c
XNNPACK
XNNPACK-master/src/cs16-vsquareabs/gen/cs16-vsquareabs-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/cs16-vsquareabs/scalar.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/math.h> #include <xnnpack/vsquareabs.h> void xnn_cs16_vsquareabs_ukernel__scalar_x4( size_t batch, const int16_t* input, uint32_t* output) { assert(batch != 0); assert(batch % (sizeof(int16_t) * 2) == 0); assert(input != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) { const int32_t vr0 = (int32_t) input[0]; const int32_t vi0 = (int32_t) input[1]; const int32_t vr1 = (int32_t) input[2]; const int32_t vi1 = (int32_t) input[3]; const int32_t vr2 = (int32_t) input[4]; const int32_t vi2 = (int32_t) input[5]; const int32_t vr3 = (int32_t) input[6]; const int32_t vi3 = (int32_t) input[7]; input += 4 * 2; uint32_t vacc0 = (uint32_t) (vr0 * vr0); uint32_t vacc1 = (uint32_t) (vr1 * vr1); uint32_t vacc2 = (uint32_t) (vr2 * vr2); uint32_t vacc3 = (uint32_t) (vr3 * vr3); vacc0 += (uint32_t) (vi0 * vi0); vacc1 += (uint32_t) (vi1 * vi1); vacc2 += (uint32_t) (vi2 * vi2); vacc3 += (uint32_t) (vi3 * vi3); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_LIKELY(batch != 0) { do { const int32_t vr = (int32_t) input[0]; const int32_t vi = (int32_t) input[1]; input += 2; uint32_t vacc = (uint32_t) (vr * vr); vacc += (uint32_t) (vi * vi); *output++ = vacc; batch -= sizeof(int16_t) * 2; } while (batch != 0); } }
1,842
25.710145
72
c
XNNPACK
XNNPACK-master/src/enums/datatype-strings.c
// Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <math.h> #include <stddef.h> #include <stdint.h> #include <xnnpack.h> #include <xnnpack/common.h> #include <xnnpack/log.h> // This function is defined inline when logging is disabled #if XNN_LOG_LEVEL > 0 const char* xnn_datatype_to_string(enum xnn_datatype type) { switch (type) { case xnn_datatype_invalid: return "Invalid"; case xnn_datatype_fp32: return "FP32"; case xnn_datatype_fp16: return "FP16"; case xnn_datatype_qint8: return "QINT8"; case xnn_datatype_quint8: return "QUINT8"; case xnn_datatype_qint32: return "QINT32"; case xnn_datatype_qcint8: return "QCINT8"; case xnn_datatype_qcint32: return "QCINT32"; } XNN_UNREACHABLE; return NULL; } #endif // XNN_LOG_LEVEL > 0
948
22.725
72
c
XNNPACK
XNNPACK-master/src/enums/microkernel-type.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. // // Auto-generated file. Do not edit! // Specification: src/enums/microkernel-type.yaml // Generator: tools/generate-enum.py #include <assert.h> #include <stdint.h> #include <xnnpack/microkernel-type.h> static const uint8_t offset[13] = { 0, 8, 24, 39, 46, 51, 74, 80, 85, 111, 116, 126, 136 }; static const char data[] = "Default\0" "Average Pooling\0" "Conv2D HWC2CHW\0" "DWConv\0" "GEMM\0" "Global Average Pooling\0" "IGEMM\0" "Mean\0" "Pixelwise Average Pooling\0" "SPMM\0" "Subconv2D\0" "Transpose\0" "VMulCAddC"; const char* xnn_microkernel_type_to_string(enum xnn_microkernel_type microkernel_type) { assert(microkernel_type >= xnn_microkernel_type_default); assert(microkernel_type <= xnn_microkernel_type_vmulcaddc); return &data[offset[microkernel_type]]; }
979
22.902439
88
c
XNNPACK
XNNPACK-master/src/enums/node-type.c
// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <math.h> #include <stddef.h> #include <stdint.h> #include <xnnpack/common.h> #include <xnnpack/log.h> #include <xnnpack/node-type.h> // This function is defined inline when logging is disabled #if XNN_LOG_LEVEL > 0 const char* xnn_node_type_to_string(enum xnn_node_type type) { switch (type) { case xnn_node_type_invalid: return "Invalid"; case xnn_node_type_abs: return "Abs"; case xnn_node_type_add2: return "Add2"; case xnn_node_type_argmax_pooling_2d: return "ArgMax Pooling 2D"; case xnn_node_type_average_pooling_2d: return "Average Pooling 2D"; case xnn_node_type_bankers_rounding: return "Bankers Rounding"; case xnn_node_type_batch_matrix_multiply: return "Batch Matrix Multiply"; case xnn_node_type_ceiling: return "Ceiling"; case xnn_node_type_clamp: return "Clamp"; case xnn_node_type_concatenate2: return "Concatenate2"; case xnn_node_type_concatenate3: return "Concatenate3"; case xnn_node_type_concatenate4: return "Concatenate4"; case xnn_node_type_convert: return "Convert"; case xnn_node_type_convolution_2d: return "Convolution 2D"; case xnn_node_type_copy: return "Copy"; case xnn_node_type_deconvolution_2d: return "Deconvolution 2D"; case xnn_node_type_depth_to_space: return "Depth To Space"; case xnn_node_type_depthwise_convolution_2d: return "Depthwise Convolution 2D"; case xnn_node_type_divide: return "Divide"; case xnn_node_type_elu: return "ELU"; case xnn_node_type_even_split2: return "Even Split2"; case xnn_node_type_even_split3: return "Even Split3"; case xnn_node_type_even_split4: return "Even Split4"; case xnn_node_type_floor: return "Floor"; case xnn_node_type_fully_connected: return "Fully Connected"; case xnn_node_type_fully_connected_sparse: return "Fully Connected Sparse"; case xnn_node_type_global_average_pooling_1d: return "Global Average Pooling 1D"; case xnn_node_type_global_average_pooling_2d: return "Global Average Pooling 2D"; case xnn_node_type_global_sum_pooling_1d: return "Global Sum Pooling 1D"; case xnn_node_type_global_sum_pooling_2d: return "Global Sum Pooling 2D"; case xnn_node_type_hardswish: return "HardSwish"; case xnn_node_type_leaky_relu: return "Leaky ReLU"; case xnn_node_type_max_pooling_2d: return "Max Pooling 2D"; case xnn_node_type_maximum2: return "Maximum2"; case xnn_node_type_minimum2: return "Minimum2"; case xnn_node_type_multiply2: return "Multiply2"; case xnn_node_type_negate: return "Negate"; case xnn_node_type_prelu: return "PReLU"; case xnn_node_type_rope: return "RoPE"; case xnn_node_type_sigmoid: return "Sigmoid"; case xnn_node_type_softmax: return "Softmax"; case xnn_node_type_space_to_depth_2d: return "Space To Depth 2D"; case xnn_node_type_square: return "Square"; case xnn_node_type_square_root: return "Square Root"; case xnn_node_type_squared_difference: return "Squared Difference"; case xnn_node_type_static_constant_pad: return "Static Constant Pad"; case xnn_node_type_static_mean: return "Static Mean"; case xnn_node_type_static_reshape: return "Static Reshape"; case xnn_node_type_static_resize_bilinear_2d: return "Static Resize Bilinear 2D"; case xnn_node_type_static_slice: return "Static Slice"; case xnn_node_type_static_transpose: return "Static Transpose"; case xnn_node_type_subtract: return "Subtract"; case xnn_node_type_tanh: return "Tanh"; case xnn_node_type_unpooling_2d: return "Unpooling 2D"; } XNN_UNREACHABLE; return NULL; } #endif // XNN_LOG_LEVEL > 0
4,108
30.128788
72
c
XNNPACK
XNNPACK-master/src/enums/operator-type.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. // // Auto-generated file. Do not edit! // Specification: src/enums/operator-type.yaml // Generator: tools/generate-enum.py #include <assert.h> #include <stdint.h> #include <xnnpack/operator-type.h> static const uint16_t offset[140] = { 0, 8, 22, 36, 50, 64, 78, 92, 119, 147, 175, 203, 230, 257, 289, 307, 325, 350, 376, 392, 408, 423, 438, 460, 483, 506, 529, 552, 575, 598, 621, 639, 662, 686, 704, 727, 751, 775, 799, 823, 847, 871, 895, 909, 924, 939, 965, 991, 1017, 1043, 1075, 1107, 1133, 1160, 1187, 1204, 1221, 1255, 1289, 1303, 1317, 1331, 1347, 1363, 1389, 1415, 1447, 1473, 1499, 1533, 1567, 1601, 1635, 1669, 1703, 1733, 1763, 1783, 1803, 1824, 1845, 1866, 1887, 1911, 1935, 1958, 1981, 1999, 2017, 2032, 2047, 2065, 2083, 2102, 2121, 2140, 2159, 2176, 2193, 2209, 2225, 2253, 2281, 2309, 2337, 2364, 2391, 2408, 2426, 2444, 2462, 2480, 2495, 2511, 2527, 2545, 2563, 2581, 2607, 2634, 2661, 2678, 2695, 2717, 2739, 2768, 2797, 2816, 2835, 2854, 2873, 2888, 2903, 2918, 2933, 2952, 2972, 2992, 3013, 3034 }; static const char data[] = "Invalid\0" "Abs (NC, F16)\0" "Abs (NC, F32)\0" "Add (ND, F16)\0" "Add (ND, F32)\0" "Add (ND, QS8)\0" "Add (ND, QU8)\0" "ArgMax Pooling (NHWC, F32)\0" "Average Pooling (NHWC, F16)\0" "Average Pooling (NHWC, F32)\0" "Average Pooling (NHWC, QU8)\0" "Bankers Rounding (NC, F16)\0" "Bankers Rounding (NC, F32)\0" "Batch Matrix Multiply (NC, F32)\0" "Ceiling (NC, F16)\0" "Ceiling (NC, F32)\0" "Channel Shuffle (NC, X8)\0" "Channel Shuffle (NC, X32)\0" "Clamp (NC, F16)\0" "Clamp (NC, F32)\0" "Clamp (NC, S8)\0" "Clamp (NC, U8)\0" "Constant Pad (ND, X8)\0" "Constant Pad (ND, X16)\0" "Constant Pad (ND, X32)\0" "Convert (NC, F16, F32)\0" "Convert (NC, F32, F16)\0" "Convert (NC, F32, QD8)\0" "Convert (NC, F32, QS8)\0" "Convert (NC, F32, QU8)\0" "Convert (NC, QS8)\0" "Convert (NC, QS8, F32)\0" "Convert (NC, QS16, QS8)\0" "Convert (NC, QU8)\0" "Convert (NC, QU8, F32)\0" "Convolution (NCHW, F16)\0" "Convolution (NCHW, F32)\0" "Convolution (NHWC, F16)\0" "Convolution (NHWC, F32)\0" "Convolution (NHWC, QC8)\0" "Convolution (NHWC, QS8)\0" "Convolution (NHWC, QU8)\0" "Copy (NC, X8)\0" "Copy (NC, X16)\0" "Copy (NC, X32)\0" "Deconvolution (NHWC, F16)\0" "Deconvolution (NHWC, F32)\0" "Deconvolution (NHWC, QS8)\0" "Deconvolution (NHWC, QU8)\0" "Depth To Space (NCHW2NHWC, X16)\0" "Depth To Space (NCHW2NHWC, X32)\0" "Depth To Space (NHWC, X8)\0" "Depth To Space (NHWC, X16)\0" "Depth To Space (NHWC, X32)\0" "Divide (ND, F16)\0" "Divide (ND, F32)\0" "Dynamic Fully Connected (NC, F16)\0" "Dynamic Fully Connected (NC, F32)\0" "ELU (NC, F16)\0" "ELU (NC, F32)\0" "ELU (NC, QS8)\0" "Floor (NC, F16)\0" "Floor (NC, F32)\0" "Fully Connected (NC, F16)\0" "Fully Connected (NC, F32)\0" "Fully Connected (NC, F32, QC8W)\0" "Fully Connected (NC, QS8)\0" "Fully Connected (NC, QU8)\0" "Global Average Pooling (NCW, F16)\0" "Global Average Pooling (NCW, F32)\0" "Global Average Pooling (NWC, F16)\0" "Global Average Pooling (NWC, F32)\0" "Global Average Pooling (NWC, QS8)\0" "Global Average Pooling (NWC, QU8)\0" "Global Sum Pooling (NWC, F16)\0" "Global Sum Pooling (NWC, F32)\0" "HardSwish (NC, F16)\0" "HardSwish (NC, F32)\0" "Leaky ReLU (NC, F16)\0" "Leaky ReLU (NC, F32)\0" "Leaky ReLU (NC, QS8)\0" "Leaky ReLU (NC, QU8)\0" "Max Pooling (NHWC, F16)\0" "Max Pooling (NHWC, F32)\0" "Max Pooling (NHWC, S8)\0" "Max Pooling (NHWC, U8)\0" "Maximum (ND, F16)\0" "Maximum (ND, F32)\0" "Mean (ND, F16)\0" "Mean (ND, F32)\0" "Minimum (ND, F16)\0" "Minimum (ND, F32)\0" "Multiply (ND, F16)\0" "Multiply (ND, F32)\0" "Multiply (ND, QS8)\0" "Multiply (ND, QU8)\0" "Negate (NC, F16)\0" "Negate (NC, F32)\0" "PReLU (NC, F16)\0" "PReLU (NC, F32)\0" "Resize Bilinear (NCHW, F16)\0" "Resize Bilinear (NCHW, F32)\0" "Resize Bilinear (NHWC, F16)\0" "Resize Bilinear (NHWC, F32)\0" "Resize Bilinear (NHWC, S8)\0" "Resize Bilinear (NHWC, U8)\0" "RoPE (NTHC, F32)\0" "Sigmoid (NC, F16)\0" "Sigmoid (NC, F32)\0" "Sigmoid (NC, QS8)\0" "Sigmoid (NC, QU8)\0" "Slice (ND, X8)\0" "Slice (ND, X16)\0" "Slice (ND, X32)\0" "Softmax (NC, F16)\0" "Softmax (NC, F32)\0" "Softmax (NC, QU8)\0" "Space To Depth (NHWC, X8)\0" "Space To Depth (NHWC, X16)\0" "Space To Depth (NHWC, X32)\0" "Square (NC, F16)\0" "Square (NC, F32)\0" "Square Root (NC, F16)\0" "Square Root (NC, F32)\0" "Squared Difference (NC, F16)\0" "Squared Difference (NC, F32)\0" "Subtract (ND, F16)\0" "Subtract (ND, F32)\0" "Subtract (ND, QS8)\0" "Subtract (ND, QU8)\0" "Tanh (NC, F16)\0" "Tanh (NC, F32)\0" "Tanh (NC, QS8)\0" "Tanh (NC, QU8)\0" "Transpose (ND, X8)\0" "Transpose (ND, X16)\0" "Transpose (ND, X32)\0" "Truncation (NC, F16)\0" "Truncation (NC, F32)\0" "Unpooling (NHWC, X32)"; const char* xnn_operator_type_to_string(enum xnn_operator_type operator_type) { assert(operator_type >= xnn_operator_type_invalid); assert(operator_type <= xnn_operator_type_unpooling_nhwc_x32); return &data[offset[operator_type]]; }
5,372
29.87931
116
c
XNNPACK
XNNPACK-master/src/f16-avgpool/f16-avgpool-9p8x-minmax-f16c-c8.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/avgpool.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8( size_t output_pixels, size_t kernel_elements, size_t channels, const void** input, size_t input_offset, const void* zero, void* buffer, void* output, size_t input_increment, size_t output_increment, const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(output_pixels != 0); assert(kernel_elements > 9); assert(channels != 0); const __m256 vscale = _mm256_load_ps(params->avx.scale); const __m256 vmin = _mm256_load_ps(params->avx.min); const __m256 vmax = _mm256_load_ps(params->avx.max); uint16_t* o = (uint16_t*) output; do { { const uint16_t* i0 = *input++; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = *input++; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = *input++; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = *input++; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = *input++; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = *input++; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = *input++; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = *input++; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = *input++; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } uint16_t* b = (uint16_t*) buffer; for (size_t c = 0; c < channels; c += 8) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8; const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8; const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8; const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8; const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8; const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8; const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8; const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); i7 += 8; const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8)); i8 += 8; const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m128i vsum = _mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT); _mm_storeu_si128((__m128i*) b, vsum); b += 8; } } size_t k = kernel_elements; for (k -= 9; k > 8; k -= 8) { const uint16_t* i0 = (const uint16_t*) *input++; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) *input++; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) *input++; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) *input++; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) *input++; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) *input++; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) *input++; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) *input++; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } uint16_t* b = (uint16_t*) buffer; for (size_t c = 0; c < channels; c += 8) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8; const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8; const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8; const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8; const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8; const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8; const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8; const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); i7 += 8; const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m128i vsum = _mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT); _mm_storeu_si128((__m128i*) b, vsum); b += 8; } } assert(k >= 1); { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); const uint16_t* i1 = (const uint16_t*) input[1]; const uint16_t* i2 = (const uint16_t*) input[2]; const uint16_t* i3 = (const uint16_t*) input[3]; const uint16_t* i4 = (const uint16_t*) input[4]; const uint16_t* i5 = (const uint16_t*) input[5]; const uint16_t* i6 = (const uint16_t*) input[6]; const uint16_t* i7 = (const uint16_t*) input[7]; input = (const void**) ((uintptr_t) input + input_increment); if (k < 2) { i1 = (const uint16_t*) zero; } assert(i1 != NULL); if (k <= 2) { i2 = (const uint16_t*) zero; } assert(i2 != NULL); if (k < 4) { i3 = (const uint16_t*) zero; } assert(i3 != NULL); if (k <= 4) { i4 = (const uint16_t*) zero; } assert(i4 != NULL); if (k < 6) { i5 = (const uint16_t*) zero; } assert(i5 != NULL); if (k <= 6) { i6 = (const uint16_t*) zero; } assert(i6 != NULL); if (k < 8) { i7 = (const uint16_t*) zero; } assert(i7 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } size_t c = channels; uint16_t* b = (uint16_t*) buffer; while (c >= 8) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8; const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8; const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8; const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8; const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8; const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8; const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8; const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); i7 += 8; const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); b += 8; const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT)); __m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vscale), _MM_FROUND_TO_NEAREST_INT)); vout = _mm256_max_ps(vout, vmin); vout = _mm256_min_ps(vout, vmax); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT)); o += 8; c -= 8; } if (c != 0) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) b)); const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vacc), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum0167a = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01a, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum0167a), _MM_FROUND_TO_NEAREST_INT)); __m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vscale), _MM_FROUND_TO_NEAREST_INT)); vout = _mm256_max_ps(vout, vmin); vout = _mm256_min_ps(vout, vmax); __m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT); if (c & 4) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (c & 2) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (c & 1) { *o = (uint16_t) _mm_extract_epi16(vh, 0); o += 1; } } } o = (uint16_t*) ((uintptr_t) o + output_increment); } while (--output_pixels != 0); }
14,990
42.452174
125
c
XNNPACK
XNNPACK-master/src/f16-avgpool/f16-avgpool-9p8x-minmax-neonfp16arith-c8.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/avgpool.h> void xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8( size_t output_pixels, size_t kernel_elements, size_t channels, const void** input, size_t input_offset, const void* zero, void* buffer, void* output, size_t input_increment, size_t output_increment, const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(output_pixels != 0); assert(kernel_elements > 9); assert(channels != 0); const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.scale)); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { { const uint16_t* i0 = *input++; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = *input++; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = *input++; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = *input++; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = *input++; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = *input++; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = *input++; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = *input++; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = *input++; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } uint16_t* b = (uint16_t*) buffer; for (size_t c = 0; c < channels; c += 8) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum018 = vaddq_f16(vsum01, vi8); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678); vst1q_u16(b, vreinterpretq_u16_f16(vsum)); b += 8; } } size_t k = kernel_elements; for (k -= 9; k > 8; k -= 8) { const uint16_t* i0 = (const uint16_t*) *input++; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) *input++; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) *input++; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) *input++; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) *input++; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) *input++; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) *input++; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) *input++; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } uint16_t* b = (uint16_t*) buffer; for (size_t c = 0; c < channels; c += 8) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum01a = vaddq_f16(vsum01, vacc); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a); vst1q_u16(b, vreinterpretq_u16_f16(vsum)); b += 8; } } assert(k >= 1); { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); const uint16_t* i1 = (const uint16_t*) input[1]; const uint16_t* i2 = (const uint16_t*) input[2]; const uint16_t* i3 = (const uint16_t*) input[3]; const uint16_t* i4 = (const uint16_t*) input[4]; const uint16_t* i5 = (const uint16_t*) input[5]; const uint16_t* i6 = (const uint16_t*) input[6]; const uint16_t* i7 = (const uint16_t*) input[7]; input = (const void**) ((uintptr_t) input + input_increment); if (k < 2) { i1 = (const uint16_t*) zero; } assert(i1 != NULL); if (k <= 2) { i2 = (const uint16_t*) zero; } assert(i2 != NULL); if (k < 4) { i3 = (const uint16_t*) zero; } assert(i3 != NULL); if (k <= 4) { i4 = (const uint16_t*) zero; } assert(i4 != NULL); if (k < 6) { i5 = (const uint16_t*) zero; } assert(i5 != NULL); if (k <= 6) { i6 = (const uint16_t*) zero; } assert(i6 != NULL); if (k < 8) { i7 = (const uint16_t*) zero; } assert(i7 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } size_t c = channels; uint16_t* b = (uint16_t*) buffer; while (c >= 8) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum01a = vaddq_f16(vsum01, vacc); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a); float16x8_t vout = vmulq_f16(vsum, vscale); vout = vmaxq_f16(vout, vmin); vout = vminq_f16(vout, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vout)); output = (uint16_t*) output + 8; c -= 8; } if (c != 0) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum01a = vaddq_f16(vsum01, vacc); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum0167a = vaddq_f16(vsum01a, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum0167a); float16x8_t vout = vmulq_f16(vsum, vscale); vout = vmaxq_f16(vout, vmin); vout = vminq_f16(vout, vmax); float16x4_t vout_lo = vget_low_f16(vout); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vout_lo)); output = (uint16_t*) output + 4; vout_lo = vget_high_f16(vout); } if (c & 2) { vst1_lane_u32(output, vreinterpret_u32_f16(vout_lo), 0); output = (uint16_t*) output + 2; vout_lo = vext_f16(vout_lo, vout_lo, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vout_lo), 0); output = (uint16_t*) output + 1; } } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
12,441
39.006431
99
c
XNNPACK
XNNPACK-master/src/f16-avgpool/f16-avgpool-9x-minmax-f16c-c8.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/avgpool.h> #include <xnnpack/intrinsics-polyfill.h> void xnn_f16_avgpool_minmax_ukernel_9x__f16c_c8( size_t output_pixels, size_t kernel_elements, size_t channels, const void** input, size_t input_offset, const void* zero, void* output, size_t input_increment, size_t output_increment, const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(output_pixels != 0); assert(kernel_elements != 0); assert(kernel_elements <= 9); assert(channels != 0); const __m256 vscale = _mm256_load_ps(params->avx.scale); const __m256 vmin = _mm256_load_ps(params->avx.min); const __m256 vmax = _mm256_load_ps(params->avx.max); uint16_t* o = (uint16_t*) output; do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); const uint16_t* i1 = (const uint16_t*) input[1]; const uint16_t* i2 = (const uint16_t*) input[2]; const uint16_t* i3 = (const uint16_t*) input[3]; const uint16_t* i4 = (const uint16_t*) input[4]; const uint16_t* i5 = (const uint16_t*) input[5]; const uint16_t* i6 = (const uint16_t*) input[6]; const uint16_t* i7 = (const uint16_t*) input[7]; const uint16_t* i8 = (const uint16_t*) input[8]; input = (const void**) ((uintptr_t) input + input_increment); if (kernel_elements < 2) { i1 = (const uint16_t*) zero; } assert(i1 != NULL); if (kernel_elements <= 2) { i2 = (const uint16_t*) zero; } assert(i2 != NULL); if (kernel_elements < 4) { i3 = (const uint16_t*) zero; } assert(i3 != NULL); if (kernel_elements <= 4) { i4 = (const uint16_t*) zero; } assert(i4 != NULL); if (kernel_elements < 6) { i5 = (const uint16_t*) zero; } assert(i5 != NULL); if (kernel_elements <= 6) { i6 = (const uint16_t*) zero; } assert(i6 != NULL); if (kernel_elements < 8) { i7 = (const uint16_t*) zero; } assert(i7 != NULL); if (kernel_elements <= 8) { i8 = (const uint16_t*) zero; } assert(i8 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } size_t c = channels; while (c >= 8) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8; const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8; const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8; const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8; const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8; const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8; const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8; const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); i7 += 8; const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8)); i8 += 8; const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT)); __m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vscale), _MM_FROUND_TO_NEAREST_INT)); vout = _mm256_max_ps(vout, vmin); vout = _mm256_min_ps(vout, vmax); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT)); o += 8; c -= 8; } if (c != 0) { const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7)); const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8)); const __m256 vsum01 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi0, vi1), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum23 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi2, vi3), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum45 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi4, vi5), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum018 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum01, vi8), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum2345 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum23, vsum45), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum01678 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum018, vsum67), _MM_FROUND_TO_NEAREST_INT)); const __m256 vsum = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vsum2345, vsum01678), _MM_FROUND_TO_NEAREST_INT)); __m256 vout = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vsum, vscale), _MM_FROUND_TO_NEAREST_INT)); vout = _mm256_max_ps(vout, vmin); vout = _mm256_min_ps(vout, vmax); __m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_TO_NEAREST_INT); if (c & 4) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (c & 2) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (c & 1) { *o = (uint16_t) _mm_extract_epi16(vh, 0); o += 1; } } o = (uint16_t*) ((uintptr_t) o + output_increment); } while (--output_pixels != 0); }
7,819
40.157895
123
c
XNNPACK
XNNPACK-master/src/f16-avgpool/f16-avgpool-9x-minmax-neonfp16arith-c8.c
// Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/avgpool.h> void xnn_f16_avgpool_minmax_ukernel_9x__neonfp16arith_c8( size_t output_pixels, size_t kernel_elements, size_t channels, const void** input, size_t input_offset, const void* zero, void* output, size_t input_increment, size_t output_increment, const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(output_pixels != 0); assert(kernel_elements != 0); assert(kernel_elements <= 9); assert(channels != 0); const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.scale)); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); const uint16_t* i1 = (const uint16_t*) input[1]; const uint16_t* i2 = (const uint16_t*) input[2]; const uint16_t* i3 = (const uint16_t*) input[3]; const uint16_t* i4 = (const uint16_t*) input[4]; const uint16_t* i5 = (const uint16_t*) input[5]; const uint16_t* i6 = (const uint16_t*) input[6]; const uint16_t* i7 = (const uint16_t*) input[7]; const uint16_t* i8 = (const uint16_t*) input[8]; input = (const void**) ((uintptr_t) input + input_increment); if (kernel_elements < 2) { i1 = (const uint16_t*) zero; } assert(i1 != NULL); if (kernel_elements <= 2) { i2 = (const uint16_t*) zero; } assert(i2 != NULL); if (kernel_elements < 4) { i3 = (const uint16_t*) zero; } assert(i3 != NULL); if (kernel_elements <= 4) { i4 = (const uint16_t*) zero; } assert(i4 != NULL); if (kernel_elements < 6) { i5 = (const uint16_t*) zero; } assert(i5 != NULL); if (kernel_elements <= 6) { i6 = (const uint16_t*) zero; } assert(i6 != NULL); if (kernel_elements < 8) { i7 = (const uint16_t*) zero; } assert(i7 != NULL); if (kernel_elements <= 8) { i8 = (const uint16_t*) zero; } assert(i8 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } size_t c = channels; while (c >= 8) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum018 = vaddq_f16(vsum01, vi8); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678); float16x8_t vout = vmulq_f16(vsum, vscale); vout = vmaxq_f16(vout, vmin); vout = vminq_f16(vout, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vout)); output = (uint16_t*) output + 8; c -= 8; } if (c != 0) { const float16x8_t vi0 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vi1 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vi2 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vi3 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vi4 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vi5 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vi6 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vi7 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vi8 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vsum01 = vaddq_f16(vi0, vi1); const float16x8_t vsum23 = vaddq_f16(vi2, vi3); const float16x8_t vsum45 = vaddq_f16(vi4, vi5); const float16x8_t vsum67 = vaddq_f16(vi6, vi7); const float16x8_t vsum018 = vaddq_f16(vsum01, vi8); const float16x8_t vsum2345 = vaddq_f16(vsum23, vsum45); const float16x8_t vsum01678 = vaddq_f16(vsum018, vsum67); const float16x8_t vsum = vaddq_f16(vsum2345, vsum01678); float16x8_t vout = vmulq_f16(vsum, vscale); vout = vmaxq_f16(vout, vmin); vout = vminq_f16(vout, vmax); float16x4_t vout_lo = vget_low_f16(vout); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vout_lo)); output = (uint16_t*) output + 4; vout_lo = vget_high_f16(vout); } if (c & 2) { vst1_lane_u32(output, vreinterpret_u32_f16(vout_lo), 0); output = (uint16_t*) output + 2; vout_lo = vext_f16(vout_lo, vout_lo, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vout_lo), 0); output = (uint16_t*) output + 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_pixels != 0); }
6,582
36.617143
97
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-25p8c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_25p8c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } const uint16_t* i9 = (const uint16_t*) input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != (const uint16_t*) zero) { i9 = (const uint16_t*) ((uintptr_t) i9 + input_offset); } const uint16_t* i10 = (const uint16_t*) input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != (const uint16_t*) zero) { i10 = (const uint16_t*) ((uintptr_t) i10 + input_offset); } const uint16_t* i11 = (const uint16_t*) input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != (const uint16_t*) zero) { i11 = (const uint16_t*) ((uintptr_t) i11 + input_offset); } const uint16_t* i12 = (const uint16_t*) input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != (const uint16_t*) zero) { i12 = (const uint16_t*) ((uintptr_t) i12 + input_offset); } const uint16_t* i13 = (const uint16_t*) input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != (const uint16_t*) zero) { i13 = (const uint16_t*) ((uintptr_t) i13 + input_offset); } const uint16_t* i14 = (const uint16_t*) input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != (const uint16_t*) zero) { i14 = (const uint16_t*) ((uintptr_t) i14 + input_offset); } const uint16_t* i15 = (const uint16_t*) input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != (const uint16_t*) zero) { i15 = (const uint16_t*) ((uintptr_t) i15 + input_offset); } const uint16_t* i16 = (const uint16_t*) input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != (const uint16_t*) zero) { i16 = (const uint16_t*) ((uintptr_t) i16 + input_offset); } const uint16_t* i17 = (const uint16_t*) input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != (const uint16_t*) zero) { i17 = (const uint16_t*) ((uintptr_t) i17 + input_offset); } const uint16_t* i18 = (const uint16_t*) input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != (const uint16_t*) zero) { i18 = (const uint16_t*) ((uintptr_t) i18 + input_offset); } const uint16_t* i19 = (const uint16_t*) input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != (const uint16_t*) zero) { i19 = (const uint16_t*) ((uintptr_t) i19 + input_offset); } const uint16_t* i20 = (const uint16_t*) input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != (const uint16_t*) zero) { i20 = (const uint16_t*) ((uintptr_t) i20 + input_offset); } const uint16_t* i21 = (const uint16_t*) input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != (const uint16_t*) zero) { i21 = (const uint16_t*) ((uintptr_t) i21 + input_offset); } const uint16_t* i22 = (const uint16_t*) input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != (const uint16_t*) zero) { i22 = (const uint16_t*) ((uintptr_t) i22 + input_offset); } const uint16_t* i23 = (const uint16_t*) input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != (const uint16_t*) zero) { i23 = (const uint16_t*) ((uintptr_t) i23 + input_offset); } const uint16_t* i24 = (const uint16_t*) input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != (const uint16_t*) zero) { i24 = (const uint16_t*) ((uintptr_t) i24 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); const float16x8_t vi9x01234567 = vreinterpretq_f16_u16(vld1q_u16(i9)); i9 += 8; const float16x8_t vk9x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi9x01234567, vk9x01234567); const float16x8_t vi10x01234567 = vreinterpretq_f16_u16(vld1q_u16(i10)); i10 += 8; const float16x8_t vk10x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); const float16x8_t vi11x01234567 = vreinterpretq_f16_u16(vld1q_u16(i11)); i11 += 8; const float16x8_t vk11x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi11x01234567, vk11x01234567); const float16x8_t vi12x01234567 = vreinterpretq_f16_u16(vld1q_u16(i12)); i12 += 8; const float16x8_t vk12x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); const float16x8_t vi13x01234567 = vreinterpretq_f16_u16(vld1q_u16(i13)); i13 += 8; const float16x8_t vk13x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi13x01234567, vk13x01234567); const float16x8_t vi14x01234567 = vreinterpretq_f16_u16(vld1q_u16(i14)); i14 += 8; const float16x8_t vk14x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); const float16x8_t vi15x01234567 = vreinterpretq_f16_u16(vld1q_u16(i15)); i15 += 8; const float16x8_t vk15x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi15x01234567, vk15x01234567); const float16x8_t vi16x01234567 = vreinterpretq_f16_u16(vld1q_u16(i16)); i16 += 8; const float16x8_t vk16x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); const float16x8_t vi17x01234567 = vreinterpretq_f16_u16(vld1q_u16(i17)); i17 += 8; const float16x8_t vk17x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi17x01234567, vk17x01234567); const float16x8_t vi18x01234567 = vreinterpretq_f16_u16(vld1q_u16(i18)); i18 += 8; const float16x8_t vk18x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); const float16x8_t vi19x01234567 = vreinterpretq_f16_u16(vld1q_u16(i19)); i19 += 8; const float16x8_t vk19x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi19x01234567, vk19x01234567); const float16x8_t vi20x01234567 = vreinterpretq_f16_u16(vld1q_u16(i20)); i20 += 8; const float16x8_t vk20x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); const float16x8_t vi21x01234567 = vreinterpretq_f16_u16(vld1q_u16(i21)); i21 += 8; const float16x8_t vk21x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi21x01234567, vk21x01234567); const float16x8_t vi22x01234567 = vreinterpretq_f16_u16(vld1q_u16(i22)); i22 += 8; const float16x8_t vk22x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); const float16x8_t vi23x01234567 = vreinterpretq_f16_u16(vld1q_u16(i23)); i23 += 8; const float16x8_t vk23x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi23x01234567, vk23x01234567); const float16x8_t vi24x01234567 = vreinterpretq_f16_u16(vld1q_u16(i24)); i24 += 8; const float16x8_t vk24x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); const float16x8_t vi9x01234567 = vreinterpretq_f16_u16(vld1q_u16(i9)); const float16x8_t vk9x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi9x01234567, vk9x01234567); const float16x8_t vi10x01234567 = vreinterpretq_f16_u16(vld1q_u16(i10)); const float16x8_t vk10x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); const float16x8_t vi11x01234567 = vreinterpretq_f16_u16(vld1q_u16(i11)); const float16x8_t vk11x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi11x01234567, vk11x01234567); const float16x8_t vi12x01234567 = vreinterpretq_f16_u16(vld1q_u16(i12)); const float16x8_t vk12x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); const float16x8_t vi13x01234567 = vreinterpretq_f16_u16(vld1q_u16(i13)); const float16x8_t vk13x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi13x01234567, vk13x01234567); const float16x8_t vi14x01234567 = vreinterpretq_f16_u16(vld1q_u16(i14)); const float16x8_t vk14x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); const float16x8_t vi15x01234567 = vreinterpretq_f16_u16(vld1q_u16(i15)); const float16x8_t vk15x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi15x01234567, vk15x01234567); const float16x8_t vi16x01234567 = vreinterpretq_f16_u16(vld1q_u16(i16)); const float16x8_t vk16x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); const float16x8_t vi17x01234567 = vreinterpretq_f16_u16(vld1q_u16(i17)); const float16x8_t vk17x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi17x01234567, vk17x01234567); const float16x8_t vi18x01234567 = vreinterpretq_f16_u16(vld1q_u16(i18)); const float16x8_t vk18x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); const float16x8_t vi19x01234567 = vreinterpretq_f16_u16(vld1q_u16(i19)); const float16x8_t vk19x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi19x01234567, vk19x01234567); const float16x8_t vi20x01234567 = vreinterpretq_f16_u16(vld1q_u16(i20)); const float16x8_t vk20x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); const float16x8_t vi21x01234567 = vreinterpretq_f16_u16(vld1q_u16(i21)); const float16x8_t vk21x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi21x01234567, vk21x01234567); const float16x8_t vi22x01234567 = vreinterpretq_f16_u16(vld1q_u16(i22)); const float16x8_t vk22x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); const float16x8_t vi23x01234567 = vreinterpretq_f16_u16(vld1q_u16(i23)); const float16x8_t vk23x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi23x01234567, vk23x01234567); const float16x8_t vi24x01234567 = vreinterpretq_f16_u16(vld1q_u16(i24)); const float16x8_t vk24x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,047
48.501235
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-25p8c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_25p8c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } const uint16_t* i9 = (const uint16_t*) input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != (const uint16_t*) zero) { i9 = (const uint16_t*) ((uintptr_t) i9 + input_offset); } const uint16_t* i10 = (const uint16_t*) input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != (const uint16_t*) zero) { i10 = (const uint16_t*) ((uintptr_t) i10 + input_offset); } const uint16_t* i11 = (const uint16_t*) input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != (const uint16_t*) zero) { i11 = (const uint16_t*) ((uintptr_t) i11 + input_offset); } const uint16_t* i12 = (const uint16_t*) input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != (const uint16_t*) zero) { i12 = (const uint16_t*) ((uintptr_t) i12 + input_offset); } const uint16_t* i13 = (const uint16_t*) input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != (const uint16_t*) zero) { i13 = (const uint16_t*) ((uintptr_t) i13 + input_offset); } const uint16_t* i14 = (const uint16_t*) input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != (const uint16_t*) zero) { i14 = (const uint16_t*) ((uintptr_t) i14 + input_offset); } const uint16_t* i15 = (const uint16_t*) input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != (const uint16_t*) zero) { i15 = (const uint16_t*) ((uintptr_t) i15 + input_offset); } const uint16_t* i16 = (const uint16_t*) input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != (const uint16_t*) zero) { i16 = (const uint16_t*) ((uintptr_t) i16 + input_offset); } const uint16_t* i17 = (const uint16_t*) input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != (const uint16_t*) zero) { i17 = (const uint16_t*) ((uintptr_t) i17 + input_offset); } const uint16_t* i18 = (const uint16_t*) input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != (const uint16_t*) zero) { i18 = (const uint16_t*) ((uintptr_t) i18 + input_offset); } const uint16_t* i19 = (const uint16_t*) input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != (const uint16_t*) zero) { i19 = (const uint16_t*) ((uintptr_t) i19 + input_offset); } const uint16_t* i20 = (const uint16_t*) input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != (const uint16_t*) zero) { i20 = (const uint16_t*) ((uintptr_t) i20 + input_offset); } const uint16_t* i21 = (const uint16_t*) input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != (const uint16_t*) zero) { i21 = (const uint16_t*) ((uintptr_t) i21 + input_offset); } const uint16_t* i22 = (const uint16_t*) input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != (const uint16_t*) zero) { i22 = (const uint16_t*) ((uintptr_t) i22 + input_offset); } const uint16_t* i23 = (const uint16_t*) input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != (const uint16_t*) zero) { i23 = (const uint16_t*) ((uintptr_t) i23 + input_offset); } const uint16_t* i24 = (const uint16_t*) input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != (const uint16_t*) zero) { i24 = (const uint16_t*) ((uintptr_t) i24 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); const float16x8_t vi9x01234567 = vreinterpretq_f16_u16(vld1q_u16(i9)); i9 += 8; const float16x8_t vk9x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi9x01234567, vk9x01234567); const float16x8_t vi10x01234567 = vreinterpretq_f16_u16(vld1q_u16(i10)); i10 += 8; const float16x8_t vk10x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); const float16x8_t vi11x01234567 = vreinterpretq_f16_u16(vld1q_u16(i11)); i11 += 8; const float16x8_t vk11x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi11x01234567, vk11x01234567); const float16x8_t vi12x01234567 = vreinterpretq_f16_u16(vld1q_u16(i12)); i12 += 8; const float16x8_t vk12x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); const float16x8_t vi13x01234567 = vreinterpretq_f16_u16(vld1q_u16(i13)); i13 += 8; const float16x8_t vk13x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi13x01234567, vk13x01234567); const float16x8_t vi14x01234567 = vreinterpretq_f16_u16(vld1q_u16(i14)); i14 += 8; const float16x8_t vk14x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); const float16x8_t vi15x01234567 = vreinterpretq_f16_u16(vld1q_u16(i15)); i15 += 8; const float16x8_t vk15x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi15x01234567, vk15x01234567); const float16x8_t vi16x01234567 = vreinterpretq_f16_u16(vld1q_u16(i16)); i16 += 8; const float16x8_t vk16x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); const float16x8_t vi17x01234567 = vreinterpretq_f16_u16(vld1q_u16(i17)); i17 += 8; const float16x8_t vk17x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi17x01234567, vk17x01234567); const float16x8_t vi18x01234567 = vreinterpretq_f16_u16(vld1q_u16(i18)); i18 += 8; const float16x8_t vk18x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); const float16x8_t vi19x01234567 = vreinterpretq_f16_u16(vld1q_u16(i19)); i19 += 8; const float16x8_t vk19x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi19x01234567, vk19x01234567); const float16x8_t vi20x01234567 = vreinterpretq_f16_u16(vld1q_u16(i20)); i20 += 8; const float16x8_t vk20x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); const float16x8_t vi21x01234567 = vreinterpretq_f16_u16(vld1q_u16(i21)); i21 += 8; const float16x8_t vk21x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi21x01234567, vk21x01234567); const float16x8_t vi22x01234567 = vreinterpretq_f16_u16(vld1q_u16(i22)); i22 += 8; const float16x8_t vk22x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); const float16x8_t vi23x01234567 = vreinterpretq_f16_u16(vld1q_u16(i23)); i23 += 8; const float16x8_t vk23x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi23x01234567, vk23x01234567); const float16x8_t vi24x01234567 = vreinterpretq_f16_u16(vld1q_u16(i24)); i24 += 8; const float16x8_t vk24x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); const float16x8_t vi9x01234567 = vreinterpretq_f16_u16(vld1q_u16(i9)); const float16x8_t vk9x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi9x01234567, vk9x01234567); const float16x8_t vi10x01234567 = vreinterpretq_f16_u16(vld1q_u16(i10)); const float16x8_t vk10x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi10x01234567, vk10x01234567); const float16x8_t vi11x01234567 = vreinterpretq_f16_u16(vld1q_u16(i11)); const float16x8_t vk11x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi11x01234567, vk11x01234567); const float16x8_t vi12x01234567 = vreinterpretq_f16_u16(vld1q_u16(i12)); const float16x8_t vk12x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi12x01234567, vk12x01234567); const float16x8_t vi13x01234567 = vreinterpretq_f16_u16(vld1q_u16(i13)); const float16x8_t vk13x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi13x01234567, vk13x01234567); const float16x8_t vi14x01234567 = vreinterpretq_f16_u16(vld1q_u16(i14)); const float16x8_t vk14x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi14x01234567, vk14x01234567); const float16x8_t vi15x01234567 = vreinterpretq_f16_u16(vld1q_u16(i15)); const float16x8_t vk15x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi15x01234567, vk15x01234567); const float16x8_t vi16x01234567 = vreinterpretq_f16_u16(vld1q_u16(i16)); const float16x8_t vk16x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi16x01234567, vk16x01234567); const float16x8_t vi17x01234567 = vreinterpretq_f16_u16(vld1q_u16(i17)); const float16x8_t vk17x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi17x01234567, vk17x01234567); const float16x8_t vi18x01234567 = vreinterpretq_f16_u16(vld1q_u16(i18)); const float16x8_t vk18x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi18x01234567, vk18x01234567); const float16x8_t vi19x01234567 = vreinterpretq_f16_u16(vld1q_u16(i19)); const float16x8_t vk19x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi19x01234567, vk19x01234567); const float16x8_t vi20x01234567 = vreinterpretq_f16_u16(vld1q_u16(i20)); const float16x8_t vk20x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi20x01234567, vk20x01234567); const float16x8_t vi21x01234567 = vreinterpretq_f16_u16(vld1q_u16(i21)); const float16x8_t vk21x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi21x01234567, vk21x01234567); const float16x8_t vi22x01234567 = vreinterpretq_f16_u16(vld1q_u16(i22)); const float16x8_t vk22x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi22x01234567, vk22x01234567); const float16x8_t vi23x01234567 = vreinterpretq_f16_u16(vld1q_u16(i23)); const float16x8_t vk23x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi23x01234567, vk23x01234567); const float16x8_t vi24x01234567 = vreinterpretq_f16_u16(vld1q_u16(i24)); const float16x8_t vk24x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi24x01234567, vk24x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
19,816
48.418953
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p16c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p16c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); // Add up all accumulators to vacc0123456789ABCDEFp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
6,824
42.471338
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p16c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p16c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
6,410
41.74
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p32c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p32c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); float16x8_t vaccGHIJKLMNp1 = vmulq_f16(vi1xGHIJKLMN, vk1xGHIJKLMN); float16x8_t vaccOPQRSTUVp1 = vmulq_f16(vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); // Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); vaccGHIJKLMNp0 = vaddq_f16(vaccGHIJKLMNp0, vaccGHIJKLMNp1); vaccOPQRSTUVp0 = vaddq_f16(vaccOPQRSTUVp0, vaccOPQRSTUVp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
8,999
47.648649
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p32c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p32c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi1xGHIJKLMN, vk1xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
8,445
46.988636
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p8c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p8c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
4,589
37.571429
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-3p8c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_3p8c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
4,358
36.904348
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p16c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p16c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); // Add up all accumulators to vacc0123456789ABCDEFp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
8,006
44.237288
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p16c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p16c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
7,592
43.664706
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p32c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p32c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); float16x8_t vaccGHIJKLMNp1 = vmulq_f16(vi1xGHIJKLMN, vk1xGHIJKLMN); float16x8_t vaccOPQRSTUVp1 = vmulq_f16(vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); vaccGHIJKLMNp1 = vfmaq_f16(vaccGHIJKLMNp1, vi3xGHIJKLMN, vk3xGHIJKLMN); vaccOPQRSTUVp1 = vfmaq_f16(vaccOPQRSTUVp1, vi3xOPQRSTUV, vk3xOPQRSTUV); // Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); vaccGHIJKLMNp0 = vaddq_f16(vaccGHIJKLMNp0, vaccGHIJKLMNp1); vaccOPQRSTUVp0 = vaddq_f16(vaccOPQRSTUVp0, vaccOPQRSTUVp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
10,679
49.616114
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p32c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p32c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi1xGHIJKLMN, vk1xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi3xGHIJKLMN, vk3xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi3xOPQRSTUV, vk3xOPQRSTUV); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
10,125
49.128713
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p8c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p8c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
5,280
39.007576
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-4p8c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_4p8c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
5,049
38.453125
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-5f5m5l16c8s4r-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_5f5m5l16c8s4r__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 5); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEFp0)); b += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 5 inputs in each iteration. for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(b + 8)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEFp0)); b += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } size_t c = channels; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
24,008
48.605372
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-5f5m5l16c8s4r-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_5f5m5l16c8s4r__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 5); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEFp0)); b += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 5 inputs in each iteration. for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(b + 8)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; vst1q_u16(b, vreinterpretq_u16_f16(vacc89ABCDEFp0)); b += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } size_t c = channels; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output = (uint16_t*) output + 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
22,766
48.172786
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-5f5m5l8c8s4r-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_5f5m5l8c8s4r__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 5); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 5 inputs in each iteration. for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
14,564
41.838235
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-5f5m5l8c8s4r-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_5f5m5l8c8s4r__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 5); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 5 inputs in each iteration. for (size_t ks = kernel_size - 5; ks > 5; ks -= 5) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } input += 5; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 5 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
13,865
41.27439
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-6f6m7l8c8s4r-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_6f6m7l8c8s4r__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 6); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 6 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } input += 6; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 6 inputs in each iteration. for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } input += 6; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 7 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
17,266
43.048469
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-6f6m7l8c8s4r-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_6f6m7l8c8s4r__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 6); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 6 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } input += 6; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 6 inputs in each iteration. for (size_t ks = kernel_size - 6; ks > 7; ks -= 6) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } input += 6; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 7 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
16,567
42.6
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-8f8m9l8c8s4r-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_8f8m9l8c8s4r__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 8); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 8 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } input += 8; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 8 inputs in each iteration. for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } input += 8; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 9 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc0123p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
21,340
44.406383
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-8f8m9l8c8s4r-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/multipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> void xnn_f16_dwconv_minmax_ukernel_8f8m9l8c8s4r__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, size_t kernel_size, void* buffer, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); assert(kernel_size > 8); const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* w = weights; // First pass to process 8 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } input += 8; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Middle pass to process 8 inputs in each iteration. for (size_t ks = kernel_size - 8; ks > 9; ks -= 8) { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } input += 8; size_t c = round_up_po2(channels, 4); for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } if (c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vst1q_u16(b, vreinterpretq_u16_f16(vacc01234567p0)); b += 8; } } // Last pass to process up to 9 inputs. { uint16_t* b = buffer; const uint16_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } size_t c = channels; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output = (uint16_t*) output + 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(b)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output = (uint16_t*) output + 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output = (uint16_t*) output + 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output = (uint16_t*) output + 1; } } } input = (const void**) (const uint16_t**) ((uintptr_t) input + input_stride); output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,641
44.069869
108
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p16c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p16c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi5x89ABCDEF, vk5x89ABCDEF); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi6x89ABCDEF, vk6x89ABCDEF); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi7x89ABCDEF, vk7x89ABCDEF); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi8x89ABCDEF, vk8x89ABCDEF); // Add up all accumulators to vacc0123456789ABCDEFp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 72)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 104)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 136)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 80)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 112)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 144)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
13,922
49.263538
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p16c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p16c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 16; c -= 16) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi5x89ABCDEF, vk5x89ABCDEF); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi6x89ABCDEF, vk6x89ABCDEF); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi7x89ABCDEF, vk7x89ABCDEF); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi8x89ABCDEF, vk8x89ABCDEF); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 8)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 40)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 72)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 104)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 136)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 16)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 48)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 80)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 112)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 144)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
13,508
49.033333
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p32c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p32c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); float16x8_t vacc89ABCDEFp1 = vmulq_f16(vi1x89ABCDEF, vk1x89ABCDEF); float16x8_t vaccGHIJKLMNp1 = vmulq_f16(vi1xGHIJKLMN, vk1xGHIJKLMN); float16x8_t vaccOPQRSTUVp1 = vmulq_f16(vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi3x89ABCDEF, vk3x89ABCDEF); vaccGHIJKLMNp1 = vfmaq_f16(vaccGHIJKLMNp1, vi3xGHIJKLMN, vk3xGHIJKLMN); vaccOPQRSTUVp1 = vfmaq_f16(vaccOPQRSTUVp1, vi3xOPQRSTUV, vk3xOPQRSTUV); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi4xGHIJKLMN, vk4xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi4xOPQRSTUV, vk4xOPQRSTUV); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi5x89ABCDEF, vk5x89ABCDEF); vaccGHIJKLMNp1 = vfmaq_f16(vaccGHIJKLMNp1, vi5xGHIJKLMN, vk5xGHIJKLMN); vaccOPQRSTUVp1 = vfmaq_f16(vaccOPQRSTUVp1, vi5xOPQRSTUV, vk5xOPQRSTUV); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi6x89ABCDEF, vk6x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi6xGHIJKLMN, vk6xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi6xOPQRSTUV, vk6xOPQRSTUV); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); vacc89ABCDEFp1 = vfmaq_f16(vacc89ABCDEFp1, vi7x89ABCDEF, vk7x89ABCDEF); vaccGHIJKLMNp1 = vfmaq_f16(vaccGHIJKLMNp1, vi7xGHIJKLMN, vk7xGHIJKLMN); vaccOPQRSTUVp1 = vfmaq_f16(vaccOPQRSTUVp1, vi7xOPQRSTUV, vk7xOPQRSTUV); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi8x89ABCDEF, vk8x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi8xGHIJKLMN, vk8xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi8xOPQRSTUV, vk8xOPQRSTUV); // Add up all accumulators to vacc0123456789ABCDEFGHIJKLMNOPQRSTUVp0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); vacc89ABCDEFp0 = vaddq_f16(vacc89ABCDEFp0, vacc89ABCDEFp1); vaccGHIJKLMNp0 = vaddq_f16(vaccGHIJKLMNp0, vaccGHIJKLMNp1); vaccOPQRSTUVp0 = vaddq_f16(vaccOPQRSTUVp0, vaccOPQRSTUVp1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 152)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 184)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 216)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 248)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 280)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 160)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 192)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 224)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 256)); vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 288)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
19,079
54.953079
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p32c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p32c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 32; c -= 32) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc89ABCDEFp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccGHIJKLMNp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vaccOPQRSTUVp0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk0xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi0x89ABCDEF, vk0x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi0xGHIJKLMN, vk0xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi0xOPQRSTUV, vk0xOPQRSTUV); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk1xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi1x89ABCDEF, vk1x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi1xGHIJKLMN, vk1xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi1xOPQRSTUV, vk1xOPQRSTUV); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk2xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi2x89ABCDEF, vk2x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi2xGHIJKLMN, vk2xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi2xOPQRSTUV, vk2xOPQRSTUV); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk3xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi3x89ABCDEF, vk3x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi3xGHIJKLMN, vk3xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi3xOPQRSTUV, vk3xOPQRSTUV); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vi4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk4xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi4x89ABCDEF, vk4x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi4xGHIJKLMN, vk4xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi4xOPQRSTUV, vk4xOPQRSTUV); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vi5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk5xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi5x89ABCDEF, vk5x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi5xGHIJKLMN, vk5xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi5xOPQRSTUV, vk5xOPQRSTUV); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vi6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk6xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi6x89ABCDEF, vk6x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi6xGHIJKLMN, vk6xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi6xOPQRSTUV, vk6xOPQRSTUV); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vi7xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk7xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi7x89ABCDEF, vk7x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi7xGHIJKLMN, vk7xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi7xOPQRSTUV, vk7xOPQRSTUV); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vi8xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vk8xOPQRSTUV = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); vacc89ABCDEFp0 = vfmaq_f16(vacc89ABCDEFp0, vi8x89ABCDEF, vk8x89ABCDEF); vaccGHIJKLMNp0 = vfmaq_f16(vaccGHIJKLMNp0, vi8xGHIJKLMN, vk8xGHIJKLMN); vaccOPQRSTUVp0 = vfmaq_f16(vaccOPQRSTUVp0, vi8xOPQRSTUV, vk8xOPQRSTUV); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); float16x8_t vacc89ABCDEF = vmaxq_f16(vacc89ABCDEFp0, vmin); float16x8_t vaccGHIJKLMN = vmaxq_f16(vaccGHIJKLMNp0, vmin); float16x8_t vaccOPQRSTUV = vmaxq_f16(vaccOPQRSTUVp0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vacc89ABCDEF = vminq_f16(vacc89ABCDEF, vmax); vaccGHIJKLMN = vminq_f16(vaccGHIJKLMN, vmax); vaccOPQRSTUV = vminq_f16(vaccOPQRSTUV, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vacc89ABCDEF)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccGHIJKLMN)); output += 8; vst1q_u16(output, vreinterpretq_u16_f16(vaccOPQRSTUV)); output += 8; } for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 24)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 56)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 88)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 120)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 152)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 184)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 216)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 248)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 280)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 32)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 64)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 96)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 128)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 160)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 192)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 224)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 256)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w + 288)); vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
18,525
54.801205
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p8c-minmax-neonfp16arith-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p8c__neonfp16arith_acc2( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; float16x8_t vacc01234567p1 = vmulq_f16(vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p1 = vfmaq_f16(vacc01234567p1, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); // Add up all accumulators to vacc01234567p0 vacc01234567p0 = vaddq_f16(vacc01234567p0, vacc01234567p1); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
8,735
43.345178
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv/gen/f16-dwconv-9p8c-minmax-neonfp16arith.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv/unipass-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f16_dwconv_minmax_ukernel_9p8c__neonfp16arith( size_t channels, size_t output_width, const void** input, const void* weights, void* output_ptr, intptr_t input_stride, size_t output_increment, size_t input_offset, const void* zero, const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(channels != 0); assert(output_width != 0); uint16_t* output = (uint16_t*) output_ptr; const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.min)); const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->fp16arith.max)); do { const uint16_t* i0 = (const uint16_t*) input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != (const uint16_t*) zero) { i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset); } const uint16_t* i1 = (const uint16_t*) input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != (const uint16_t*) zero) { i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset); } const uint16_t* i2 = (const uint16_t*) input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != (const uint16_t*) zero) { i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset); } const uint16_t* i3 = (const uint16_t*) input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != (const uint16_t*) zero) { i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset); } const uint16_t* i4 = (const uint16_t*) input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != (const uint16_t*) zero) { i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset); } const uint16_t* i5 = (const uint16_t*) input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != (const uint16_t*) zero) { i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset); } const uint16_t* i6 = (const uint16_t*) input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != (const uint16_t*) zero) { i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset); } const uint16_t* i7 = (const uint16_t*) input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != (const uint16_t*) zero) { i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset); } const uint16_t* i8 = (const uint16_t*) input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != (const uint16_t*) zero) { i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset); } input = (const void**) ((uintptr_t) input + input_stride); size_t c = channels; const uint16_t* w = (const uint16_t*) weights; for (; c >= 8; c -= 8) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); i5 += 8; const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); i6 += 8; const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); i7 += 8; const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); i8 += 8; const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); vst1q_u16(output, vreinterpretq_u16_f16(vacc01234567)); output += 8; } if XNN_UNLIKELY(c != 0) { float16x8_t vacc01234567p0 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; const float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vld1q_u16(i0)); const float16x8_t vk0x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi0x01234567, vk0x01234567); const float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vld1q_u16(i1)); const float16x8_t vk1x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi1x01234567, vk1x01234567); const float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vld1q_u16(i2)); const float16x8_t vk2x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi2x01234567, vk2x01234567); const float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vld1q_u16(i3)); const float16x8_t vk3x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi3x01234567, vk3x01234567); const float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vld1q_u16(i4)); const float16x8_t vk4x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi4x01234567, vk4x01234567); const float16x8_t vi5x01234567 = vreinterpretq_f16_u16(vld1q_u16(i5)); const float16x8_t vk5x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi5x01234567, vk5x01234567); const float16x8_t vi6x01234567 = vreinterpretq_f16_u16(vld1q_u16(i6)); const float16x8_t vk6x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi6x01234567, vk6x01234567); const float16x8_t vi7x01234567 = vreinterpretq_f16_u16(vld1q_u16(i7)); const float16x8_t vk7x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi7x01234567, vk7x01234567); const float16x8_t vi8x01234567 = vreinterpretq_f16_u16(vld1q_u16(i8)); const float16x8_t vk8x01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); w += 8; vacc01234567p0 = vfmaq_f16(vacc01234567p0, vi8x01234567, vk8x01234567); float16x8_t vacc01234567 = vmaxq_f16(vacc01234567p0, vmin); vacc01234567 = vminq_f16(vacc01234567, vmax); float16x4_t vacc0123 = vget_low_f16(vacc01234567); if (c & 4) { vst1_u16(output, vreinterpret_u16_f16(vacc0123)); output += 4; vacc0123 = vget_high_f16(vacc01234567); } if (c & 2) { vst1_lane_u32((void*) output, vreinterpret_u32_f16(vacc0123), 0); output += 2; vacc0123 = vext_f16(vacc0123, vacc0123, 2); } if (c & 1) { vst1_lane_u16(output, vreinterpret_u16_f16(vacc0123), 0); output += 1; } } output = (uint16_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
8,504
43.067358
89
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-1x8-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_1x8_acc2( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0x789ABCDE, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vo0p0 = vaddq_f16(vo0p0, vo0p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0x789ABCDE, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i1 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); } while (--output_height != 0); }
9,343
36.98374
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-1x8-acc3.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_1x8_acc3( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo0p2 = vmulq_lane_f16(vi2x89ABCDEF, vw89, 0); // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x789ABCDE, vw01234567, 4); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi2x789ABCDE, vw01234567, 7); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_lane_f16(vo0p2, vi2x9ABCDEFG, vw89, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi2x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo0p2 = vmulq_lane_f16(vi2x89ABCDEF, vw89, 0); // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x789ABCDE, vw01234567, 4); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi2x789ABCDE, vw01234567, 7); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_lane_f16(vo0p2, vi2x9ABCDEFG, vw89, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi2x9ABCDEFG, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i1 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); } while (--output_height != 0); }
9,203
37.35
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-1x8-acc4.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_1x8_acc4( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo0p2 = vmulq_lane_f16(vi2x89ABCDEF, vw89, 0); // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); float16x8_t vo0p3 = vmulq_lane_f16(vi0x789ABCDE, vget_low_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi0x9ABCDEFG, vw01234567, 3); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p3 = vfmaq_laneq_f16(vo0p3, vi1x9ABCDEFG, vw01234567, 6); #else vo0p3 = vmlaq_lane_f16(vo0p3, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p2 = vaddq_f16(vo0p2, vo0p3); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo0p2 = vmulq_lane_f16(vi2x89ABCDEF, vw89, 0); // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); float16x8_t vo0p3 = vmulq_lane_f16(vi0x789ABCDE, vget_low_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi0x9ABCDEFG, vw01234567, 3); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p3 = vfmaq_laneq_f16(vo0p3, vi1x9ABCDEFG, vw01234567, 6); #else vo0p3 = vmlaq_lane_f16(vo0p3, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p2 = vaddq_f16(vo0p2, vo0p3); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i1 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); } while (--output_height != 0); }
9,049
37.675214
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-1x8.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_1x8( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i1 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); } while (--output_height != 0); }
9,492
36.670635
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-2x8-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_2x8_acc2( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); uint16_t* o0 = output; uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + input_width); size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; o1 = o0; } if XNN_UNPREDICTABLE(output_height < 3) { i3 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo1p1 = vmulq_lane_f16(vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0x789ABCDE, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi1x789ABCDE, vw01234567, 1); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi3x789ABCDE, vw01234567, 7); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; vi3x01234567 = vi3x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vi3xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi2x9ABCDEFG, vw01234567, 6); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vi3x89ABCDEF = vi3xGHIJKLMN; vo0p0 = vaddq_f16(vo0p0, vo0p1); vo1p0 = vaddq_f16(vo1p0, vo1p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); vi3x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi3x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif float16x8_t vo0p1 = vmulq_lane_f16(vi1x89ABCDEF, vget_high_f16(vw01234567), 1); float16x8_t vo1p1 = vmulq_lane_f16(vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0x789ABCDE, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi1x789ABCDE, vw01234567, 1); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2x789ABCDE, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi3x789ABCDE, vw01234567, 7); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1x9ABCDEFG, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p1 = vfmaq_laneq_f16(vo1p1, vi2x9ABCDEFG, vw01234567, 6); #else vo1p1 = vmlaq_lane_f16(vo1p1, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo1p0 = vaddq_f16(vo1p0, vo1p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo1_lo = vget_low_f16(vo1); float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o1, vreinterpret_u16_f16(vo1_lo)); o1 += 4; vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo1_lo = vget_high_f16(vo1); vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vo1_lo), 0); o1 += 2; vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); vo1_lo = vext_f16(vo1_lo, vo1_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o1, vreinterpret_u16_f16(vo1_lo), 0); o1 += 1; vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i3 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); o0 = o1; o1 = (uint16_t*) ((uintptr_t) o0 + input_width); output_height = doz(output_height, 2); } while (output_height != 0); }
14,582
39.063187
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-2x8.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_2x8( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); uint16_t* o0 = output; uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + input_width); size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; o1 = o0; } if XNN_UNPREDICTABLE(output_height < 3) { i3 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x89ABCDEF, vw01234567, 5); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x789ABCDE, vw01234567, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi3x789ABCDE, vw01234567, 7); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; vi3x01234567 = vi3x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vi3xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x9ABCDEFG, vw01234567, 6); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vi3x89ABCDEF = vi3xGHIJKLMN; float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); vi3x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi3x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x89ABCDEF, vw01234567, 5); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x789ABCDE, vw01234567, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi3x789ABCDE, vw01234567, 7); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x9ABCDEFG, vw01234567, 6); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo1_lo = vget_low_f16(vo1); float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o1, vreinterpret_u16_f16(vo1_lo)); o1 += 4; vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo1_lo = vget_high_f16(vo1); vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vo1_lo), 0); o1 += 2; vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); vo1_lo = vext_f16(vo1_lo, vo1_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o1, vreinterpret_u16_f16(vo1_lo), 0); o1 += 1; vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i3 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); o0 = o1; o1 = (uint16_t*) ((uintptr_t) o0 + input_width); output_height = doz(output_height, 2); } while (output_height != 0); }
14,885
38.590426
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3p1-minmax-neonfp16arith-3x8.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_3x8( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top == 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride1.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask = vld1q_u16(params->neonfp16arith_stride1.mask); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(uint16_t)); const uint16_t* i0 = zero; const uint16_t* i1 = input; const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_width); uint16_t* o0 = output; uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + input_width); uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + input_width); size_t output_height = input_height; do { if XNN_UNPREDICTABLE(output_height < 2) { i2 = zero; o1 = o0; } if XNN_UNPREDICTABLE(output_height < 3) { i3 = zero; o2 = o1; } if XNN_UNPREDICTABLE(output_height < 4) { i4 = zero; } float16x8_t vi0x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi3x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi4x01234567 = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi0x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; float16x8_t vi1x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; float16x8_t vi2x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; float16x8_t vi3x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; float16x8_t vi4x89ABCDEF = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; size_t w = input_width; for (; w > 8 * sizeof(uint16_t); w -= 8 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo2p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const float16x8_t vi0xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i0)); i0 += 8; const float16x8_t vi1xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i1)); i1 += 8; const float16x8_t vi2xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i2)); i2 += 8; const float16x8_t vi3xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i3)); i3 += 8; const float16x8_t vi4xGHIJKLMN = vreinterpretq_f16_u16(vld1q_u16(i4)); i4 += 8; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x89ABCDEF, vw01234567, 2); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x89ABCDEF, vw01234567, 5); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x89ABCDEF, vw01234567, 5); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); const float16x8_t vi4x789ABCDE = vextq_f16(vi4x01234567, vi4x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x789ABCDE, vw01234567, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x789ABCDE, vw01234567, 1); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x789ABCDE, vw01234567, 4); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi3x789ABCDE, vw01234567, 7); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi4x789ABCDE, vw01234567, 7); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x789ABCDE, vget_high_f16(vw01234567), 3); #endif vi0x01234567 = vi0x89ABCDEF; vi1x01234567 = vi1x89ABCDEF; vi2x01234567 = vi2x89ABCDEF; vi3x01234567 = vi3x89ABCDEF; vi4x01234567 = vi4x89ABCDEF; // Right column const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vi3xGHIJKLMN, 1); const float16x8_t vi4x9ABCDEFG = vextq_f16(vi4x89ABCDEF, vi4xGHIJKLMN, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x9ABCDEFG, vw01234567, 3); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x9ABCDEFG, vw01234567, 6); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x9ABCDEFG, vw01234567, 6); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1); #endif vi0x89ABCDEF = vi0xGHIJKLMN; vi1x89ABCDEF = vi1xGHIJKLMN; vi2x89ABCDEF = vi2xGHIJKLMN; vi3x89ABCDEF = vi3xGHIJKLMN; vi4x89ABCDEF = vi4xGHIJKLMN; float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); float16x8_t vo2 = vmaxq_f16(vo2p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); vo2 = vminq_f16(vo2, vmax); vst1q_u16(o2, vreinterpretq_u16_f16(vo2)); o2 += 8; vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Always process the last block of 1..8 pixels. assert(w >= 1 * sizeof(uint16_t)); assert(w <= 8 * sizeof(uint16_t)); { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); float16x8_t vo2p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF))); vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF))); vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF))); vi3x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi3x89ABCDEF))); vi4x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi4x89ABCDEF))); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x89ABCDEF, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x89ABCDEF, vw01234567, 2); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x89ABCDEF, vw01234567, 2); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x89ABCDEF, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x89ABCDEF, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x89ABCDEF, vw01234567, 5); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x89ABCDEF, vw01234567, 5); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x89ABCDEF, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0); #endif // Left column const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7); const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7); const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7); const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7); const float16x8_t vi4x789ABCDE = vextq_f16(vi4x01234567, vi4x89ABCDEF, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x789ABCDE, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x789ABCDE, vw01234567, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x789ABCDE, vw01234567, 1); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x789ABCDE, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x789ABCDE, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x789ABCDE, vw01234567, 4); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x789ABCDE, vw01234567, 4); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x789ABCDE, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2x789ABCDE, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi3x789ABCDE, vw01234567, 7); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi4x789ABCDE, vw01234567, 7); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x789ABCDE, vget_high_f16(vw01234567), 3); #endif // Right column const float16x8_t vzero = vreinterpretq_f16_u16(vmovq_n_u16(0)); const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1); const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1); const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1); const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vzero, 1); const float16x8_t vi4x9ABCDEFG = vextq_f16(vi4x89ABCDEF, vzero, 1); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0x9ABCDEFG, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi1x9ABCDEFG, vw01234567, 3); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi2x9ABCDEFG, vw01234567, 3); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi2x9ABCDEFG, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1x9ABCDEFG, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_laneq_f16(vo1p0, vi2x9ABCDEFG, vw01234567, 6); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_laneq_f16(vo2p0, vi3x9ABCDEFG, vw01234567, 6); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi3x9ABCDEFG, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #else vo1p0 = vmlaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1); #endif #if XNN_ARCH_ARM64 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1); #else vo2p0 = vmlaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1); #endif float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); float16x8_t vo1 = vmaxq_f16(vo1p0, vmin); float16x8_t vo2 = vmaxq_f16(vo2p0, vmin); vo0 = vminq_f16(vo0, vmax); vo1 = vminq_f16(vo1, vmax); vo2 = vminq_f16(vo2, vmax); if XNN_LIKELY(w == 8 * sizeof(uint16_t)) { vst1q_u16(o2, vreinterpretq_u16_f16(vo2)); o2 += 8; vst1q_u16(o1, vreinterpretq_u16_f16(vo1)); o1 += 8; vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo2_lo = vget_low_f16(vo2); float16x4_t vo1_lo = vget_low_f16(vo1); float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (4 * sizeof(uint16_t))) { vst1_u16(o2, vreinterpret_u16_f16(vo2_lo)); o2 += 4; vst1_u16(o1, vreinterpret_u16_f16(vo1_lo)); o1 += 4; vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo2_lo = vget_high_f16(vo2); vo1_lo = vget_high_f16(vo1); vo0_lo = vget_high_f16(vo0); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o2, vreinterpret_u32_f16(vo2_lo), 0); o2 += 2; vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vo1_lo), 0); o1 += 2; vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); vo1_lo = vext_f16(vo1_lo, vo1_lo, 2); vo2_lo = vext_f16(vo2_lo, vo2_lo, 2); } if (w & (1 * sizeof(uint16_t))) { vst1_lane_u16(o2, vreinterpret_u16_f16(vo2_lo), 0); o2 += 1; vst1_lane_u16(o1, vreinterpret_u16_f16(vo1_lo), 0); o1 += 1; vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i3 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i4 - input_decrement); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); i3 = (const uint16_t*) ((uintptr_t) i2 + input_width); i4 = (const uint16_t*) ((uintptr_t) i3 + input_width); o0 = o2; o1 = (uint16_t*) ((uintptr_t) o0 + input_width); o2 = (uint16_t*) ((uintptr_t) o1 + input_width); output_height = doz(output_height, 3); } while (output_height != 0); }
20,224
39.61245
98
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3s2p1-minmax-neonfp16arith-1x8-acc2.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3s2p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x8_acc2( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top <= 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask_even = vld1q_u16(params->neonfp16arith_stride2.mask_even); const uint16x8_t vmask_odd = vld1q_u16(params->neonfp16arith_stride2.mask_odd); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_down_po2(input_width, 8 /* SIMD output width */ * 2 /* subsampling */ * sizeof(uint16_t)); const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input - ((-padding_top) & input_width)); const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); if XNN_UNPREDICTABLE(padding_top != 0) { i0 = zero; } const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */; size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2; do { if XNN_UNPREDICTABLE(padded_input_height < 4) { i2 = zero; } float16x8_t vi0x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); size_t w = input_width; for (; w >= 16 * sizeof(uint16_t); w -= 16 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); i0 += 16; const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); i1 += 16; const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); i2 += 16; // Center column float16x8_t vo0p1 = vmulq_lane_f16(vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[0]), vget_low_f16(vw01234567), 2); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), 7); vi0x13579BDF = vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), 7); vi1x13579BDF = vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), 7); vi2x13579BDF = vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0xFHJLNPRT, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2xFHJLNPRT, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Last block has 0-15 pixels to process. assert(w < 16 * sizeof(uint16_t)); if XNN_LIKELY(w != 0) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); const float16x8_t vi0xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi0xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi0xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi0xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi1xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi1xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi1xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi1xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi2xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi2xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi2xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi2xGIKMOQSUHJLNPRTV.val[1])); // Center column float16x8_t vo0p1 = vmulq_lane_f16(vi0xGIKMOQSU, vget_low_f16(vw01234567), 2); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xGIKMOQSU, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xGIKMOQSU, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2xGIKMOQSU, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xGIKMOQSU, vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vi0xHJLNPRTV, 7); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vi1xHJLNPRTV, 7); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vi2xHJLNPRTV, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0xFHJLNPRT, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2xFHJLNPRT, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xHJLNPRTV, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xHJLNPRTV, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1xHJLNPRTV, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1xHJLNPRTV, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2xHJLNPRTV, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xHJLNPRTV, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); w += 1 * sizeof(uint16_t); if XNN_LIKELY(w == 16 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (8 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (4 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); output_height -= 1; padded_input_height -= 2; } while (output_height != 0); }
10,172
40.864198
129
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3s2p1-minmax-neonfp16arith-1x8-acc3.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3s2p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x8_acc3( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top <= 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask_even = vld1q_u16(params->neonfp16arith_stride2.mask_even); const uint16x8_t vmask_odd = vld1q_u16(params->neonfp16arith_stride2.mask_odd); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_down_po2(input_width, 8 /* SIMD output width */ * 2 /* subsampling */ * sizeof(uint16_t)); const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input - ((-padding_top) & input_width)); const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); if XNN_UNPREDICTABLE(padding_top != 0) { i0 = zero; } const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */; size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2; do { if XNN_UNPREDICTABLE(padded_input_height < 4) { i2 = zero; } float16x8_t vi0x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); size_t w = input_width; for (; w >= 16 * sizeof(uint16_t); w -= 16 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); i0 += 16; const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); i1 += 16; const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); i2 += 16; // Center column float16x8_t vo0p1 = vmulq_lane_f16(vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[0]), vget_low_f16(vw01234567), 2); float16x8_t vo0p2 = vmulq_lane_f16(vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #else vo0p1 = vmlaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), 7); vi0x13579BDF = vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), 7); vi1x13579BDF = vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), 7); vi2x13579BDF = vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]); #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi0xFHJLNPRT, vw01234567, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2xFHJLNPRT, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 3); #else vo0p2 = vmlaq_lane_f16(vo0p2, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Last block has 0-15 pixels to process. assert(w < 16 * sizeof(uint16_t)); if XNN_LIKELY(w != 0) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); const float16x8_t vi0xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi0xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi0xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi0xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi1xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi1xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi1xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi1xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi2xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi2xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi2xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi2xGIKMOQSUHJLNPRTV.val[1])); // Center column float16x8_t vo0p1 = vmulq_lane_f16(vi0xGIKMOQSU, vget_low_f16(vw01234567), 2); float16x8_t vo0p2 = vmulq_lane_f16(vi1xGIKMOQSU, vget_high_f16(vw01234567), 1); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_lane_f16(vo0p1, vi2xGIKMOQSU, vw89, 0); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xGIKMOQSU, vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vi0xHJLNPRTV, 7); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vi1xHJLNPRTV, 7); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vi2xHJLNPRTV, 7); #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi0xFHJLNPRT, vw01234567, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi2xFHJLNPRT, vw01234567, 7); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi0xHJLNPRTV, vw01234567, 3); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi0xHJLNPRTV, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xHJLNPRTV, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xHJLNPRTV, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_lane_f16(vo0p1, vi2xHJLNPRTV, vw89, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi2xHJLNPRTV, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); w += 1 * sizeof(uint16_t); if XNN_LIKELY(w == 16 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (8 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (4 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); output_height -= 1; padded_input_height -= 2; } while (output_height != 0); }
9,980
41.113924
129
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3s2p1-minmax-neonfp16arith-1x8-acc4.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3s2p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x8_acc4( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top <= 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask_even = vld1q_u16(params->neonfp16arith_stride2.mask_even); const uint16x8_t vmask_odd = vld1q_u16(params->neonfp16arith_stride2.mask_odd); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_down_po2(input_width, 8 /* SIMD output width */ * 2 /* subsampling */ * sizeof(uint16_t)); const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input - ((-padding_top) & input_width)); const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); if XNN_UNPREDICTABLE(padding_top != 0) { i0 = zero; } const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */; size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2; do { if XNN_UNPREDICTABLE(padded_input_height < 4) { i2 = zero; } float16x8_t vi0x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); size_t w = input_width; for (; w >= 16 * sizeof(uint16_t); w -= 16 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); i0 += 16; const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); i1 += 16; const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); i2 += 16; // Center column float16x8_t vo0p1 = vmulq_lane_f16(vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[0]), vget_low_f16(vw01234567), 2); float16x8_t vo0p2 = vmulq_lane_f16(vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vget_high_f16(vw01234567), 1); float16x8_t vo0p3 = vmulq_lane_f16(vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), 7); vi0x13579BDF = vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), 7); vi1x13579BDF = vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), 7); vi2x13579BDF = vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0xFHJLNPRT, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi1xFHJLNPRT, vw01234567, 4); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p3 = vfmaq_laneq_f16(vo0p3, vi2xFHJLNPRT, vw01234567, 7); #else vo0p3 = vmlaq_lane_f16(vo0p3, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_lane_f16(vo0p2, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p2 = vaddq_f16(vo0p2, vo0p3); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Last block has 0-15 pixels to process. assert(w < 16 * sizeof(uint16_t)); if XNN_LIKELY(w != 0) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); const float16x8_t vi0xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi0xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi0xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi0xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi1xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi1xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi1xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi1xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi2xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi2xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi2xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi2xGIKMOQSUHJLNPRTV.val[1])); // Center column float16x8_t vo0p1 = vmulq_lane_f16(vi0xGIKMOQSU, vget_low_f16(vw01234567), 2); float16x8_t vo0p2 = vmulq_lane_f16(vi1xGIKMOQSU, vget_high_f16(vw01234567), 1); float16x8_t vo0p3 = vmulq_lane_f16(vi2xGIKMOQSU, vw89, 0); // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vi0xHJLNPRTV, 7); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vi1xHJLNPRTV, 7); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vi2xHJLNPRTV, 7); #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi0xFHJLNPRT, vw01234567, 1); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_laneq_f16(vo0p2, vi1xFHJLNPRT, vw01234567, 4); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p3 = vfmaq_laneq_f16(vo0p3, vi2xFHJLNPRT, vw01234567, 7); #else vo0p3 = vmlaq_lane_f16(vo0p3, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xHJLNPRTV, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xHJLNPRTV, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p1 = vfmaq_laneq_f16(vo0p1, vi1xHJLNPRTV, vw01234567, 6); #else vo0p1 = vmlaq_lane_f16(vo0p1, vi1xHJLNPRTV, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p2 = vfmaq_lane_f16(vo0p2, vi2xHJLNPRTV, vw89, 1); #else vo0p2 = vmlaq_lane_f16(vo0p2, vi2xHJLNPRTV, vw89, 1); #endif vo0p0 = vaddq_f16(vo0p0, vo0p1); vo0p2 = vaddq_f16(vo0p2, vo0p3); vo0p0 = vaddq_f16(vo0p0, vo0p2); float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); w += 1 * sizeof(uint16_t); if XNN_LIKELY(w == 16 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (8 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (4 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); output_height -= 1; padded_input_height -= 2; } while (output_height != 0); }
9,802
41.437229
129
c
XNNPACK
XNNPACK-master/src/f16-dwconv2d-chw/gen/f16-dwconv2d-chw-3x3s2p1-minmax-neonfp16arith-1x8.c
// Auto-generated file. Do not edit! // Template: src/f16-dwconv2d-chw/3x3s2p1-neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> void xnn_f16_dwconv2d_chw_ukernel_3x3s2p1__neonfp16arith_1x8( size_t input_height, size_t input_width, const void* input, const void* weights, const void* zero, void* output, uint32_t padding_top, const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(input_height != 0); assert(input_width != 0); assert(input_width % sizeof(uint16_t) == 0); assert(padding_top <= 1); #if XNN_ARCH_ARM64 const uint16x8x2_t vminmax = vld2q_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vminmax.val[0]); const float16x8_t vmax = vreinterpretq_f16_u16(vminmax.val[1]); #else // vld2_dup is to work around aarch32 clang bug with vld1q_dup const uint16x4x2_t vminmax = vld2_dup_u16(&params->neonfp16arith_stride2.min); const float16x8_t vmin = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[0], vminmax.val[0])); const float16x8_t vmax = vreinterpretq_f16_u16(vcombine_u16(vminmax.val[1], vminmax.val[1])); #endif const uint16x8_t vmask_even = vld1q_u16(params->neonfp16arith_stride2.mask_even); const uint16x8_t vmask_odd = vld1q_u16(params->neonfp16arith_stride2.mask_odd); const uint16_t* w = (const uint16_t*) weights; const float16x8_t vw01234567 = vreinterpretq_f16_u16(vld1q_u16(w)); const float16x4_t vw89 = vreinterpret_f16_u32(vld1_dup_u32((const void*) (w + 8))); const size_t input_decrement = round_down_po2(input_width, 8 /* SIMD output width */ * 2 /* subsampling */ * sizeof(uint16_t)); const uint16_t* i0 = (const uint16_t*) ((uintptr_t) input - ((-padding_top) & input_width)); const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); if XNN_UNPREDICTABLE(padding_top != 0) { i0 = zero; } const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); uint16_t* o0 = output; size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */; size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2; do { if XNN_UNPREDICTABLE(padded_input_height < 4) { i2 = zero; } float16x8_t vi0x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi1x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); float16x8_t vi2x13579BDF = vreinterpretq_f16_u16(vmovq_n_u16(0)); size_t w = input_width; for (; w >= 16 * sizeof(uint16_t); w -= 16 * sizeof(uint16_t)) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); i0 += 16; const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); i1 += 16; const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); i2 += 16; // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[0]), vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[0]), vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[0]), vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[0]), vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), 7); vi0x13579BDF = vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), 7); vi1x13579BDF = vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), 7); vi2x13579BDF = vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xFHJLNPRT, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2xFHJLNPRT, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi0xGIKMOQSUHJLNPRTV.val[1]), vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi1xGIKMOQSUHJLNPRTV.val[1]), vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vreinterpretq_f16_u16(vi2xGIKMOQSUHJLNPRTV.val[1]), vw89, 1); #endif float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } // Last block has 0-15 pixels to process. assert(w < 16 * sizeof(uint16_t)); if XNN_LIKELY(w != 0) { float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0); const uint16x8x2_t vi0xGIKMOQSUHJLNPRTV = vld2q_u16(i0); const uint16x8x2_t vi1xGIKMOQSUHJLNPRTV = vld2q_u16(i1); const uint16x8x2_t vi2xGIKMOQSUHJLNPRTV = vld2q_u16(i2); const float16x8_t vi0xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi0xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi0xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi0xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi1xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi1xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi1xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi1xGIKMOQSUHJLNPRTV.val[1])); const float16x8_t vi2xGIKMOQSU = vreinterpretq_f16_u16(vandq_u16(vmask_even, vi2xGIKMOQSUHJLNPRTV.val[0])); const float16x8_t vi2xHJLNPRTV = vreinterpretq_f16_u16(vandq_u16(vmask_odd, vi2xGIKMOQSUHJLNPRTV.val[1])); // Center column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xGIKMOQSU, vw01234567, 2); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xGIKMOQSU, vget_low_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xGIKMOQSU, vw01234567, 5); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xGIKMOQSU, vget_high_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2xGIKMOQSU, vw89, 0); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xGIKMOQSU, vw89, 0); #endif // Left column const float16x8_t vi0xFHJLNPRT = vextq_f16(vi0x13579BDF, vi0xHJLNPRTV, 7); const float16x8_t vi1xFHJLNPRT = vextq_f16(vi1x13579BDF, vi1xHJLNPRTV, 7); const float16x8_t vi2xFHJLNPRT = vextq_f16(vi2x13579BDF, vi2xHJLNPRTV, 7); #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xFHJLNPRT, vw01234567, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xFHJLNPRT, vget_low_f16(vw01234567), 1); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xFHJLNPRT, vw01234567, 4); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xFHJLNPRT, vget_high_f16(vw01234567), 0); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi2xFHJLNPRT, vw01234567, 7); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xFHJLNPRT, vget_high_f16(vw01234567), 3); #endif // Right column #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi0xHJLNPRTV, vw01234567, 3); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi0xHJLNPRTV, vget_low_f16(vw01234567), 3); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_laneq_f16(vo0p0, vi1xHJLNPRTV, vw01234567, 6); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi1xHJLNPRTV, vget_high_f16(vw01234567), 2); #endif #if XNN_ARCH_ARM64 vo0p0 = vfmaq_lane_f16(vo0p0, vi2xHJLNPRTV, vw89, 1); #else vo0p0 = vmlaq_lane_f16(vo0p0, vi2xHJLNPRTV, vw89, 1); #endif float16x8_t vo0 = vmaxq_f16(vo0p0, vmin); vo0 = vminq_f16(vo0, vmax); w += 1 * sizeof(uint16_t); if XNN_LIKELY(w == 16 * sizeof(uint16_t)) { vst1q_u16(o0, vreinterpretq_u16_f16(vo0)); o0 += 8; } else { float16x4_t vo0_lo = vget_low_f16(vo0); if (w & (8 * sizeof(uint16_t))) { vst1_u16(o0, vreinterpret_u16_f16(vo0_lo)); o0 += 4; vo0_lo = vget_high_f16(vo0); } if (w & (4 * sizeof(uint16_t))) { vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2; vo0_lo = vext_f16(vo0_lo, vo0_lo, 2); } if (w & (2 * sizeof(uint16_t))) { vst1_lane_u16(o0, vreinterpret_u16_f16(vo0_lo), 0); o0 += 1; } } } i0 = (const uint16_t*) ((uintptr_t) i2 - input_decrement); i1 = (const uint16_t*) ((uintptr_t) i0 + input_width); i2 = (const uint16_t*) ((uintptr_t) i1 + input_width); output_height -= 1; padded_input_height -= 2; } while (output_height != 0); }
10,359
40.606426
129
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx-int16-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx_int16_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0))); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0))); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1))); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); output += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
7,289
47.926174
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx-int16-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx_int16_x24( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13); const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0))); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0))); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1))); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1))); const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2), _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2))); const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2), _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4)); _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5)); output += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
8,650
51.430303
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx-int16-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx_int16_x32( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask); const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2); const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13); const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset); const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13); const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale)); const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias)); const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff); const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0))); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0))); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1))); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1))); const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2), _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2))); const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2), _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2))); const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3), _mm_blendv_epi8(vdenorm6, vnorm6, _mm_cvtepi16_epi32(vmask3))); const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3), _mm_blendv_epi8(vdenorm7, vnorm7, _mm_unpackhi_epi16(vmask3, vmask3))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4)); _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5)); _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6)); _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7)); output += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
10,011
54.314917
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx-int16-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx_int16_x8( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
4,463
41.514286
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx512skx-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/avx512skx.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx512skx_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m512 vacc = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) i)); i += 16; _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 15 * sizeof(uint16_t)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_HALF; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); const __m512 vacc = _mm512_cvtph_ps(_mm256_maskz_loadu_epi16(vmask, i)); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,458
27.607843
105
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-avx512skx-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/avx512skx.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__avx512skx_x32( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m512 vacc0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) i)); const __m512 vacc1 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) (i + 16))); i += 32; _mm512_storeu_ps(output, vacc0); _mm512_storeu_ps(output + 16, vacc1); output += 32; } for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m512 vacc = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) i)); i += 16; _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 15 * sizeof(uint16_t)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_HALF; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); const __m512 vacc = _mm512_cvtph_ps(_mm256_maskz_loadu_epi16(vmask, i)); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,819
29.333333
105
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-f16c-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/f16c.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__f16c_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); const __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); i += 16; _mm256_storeu_ps(output, vacc0); _mm256_storeu_ps(output + 8, vacc1); output += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 7 * sizeof(uint16_t)); const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, vacc_lo); } } }
1,961
28.283582
90
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-f16c-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/f16c.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__f16c_x8( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 7 * sizeof(uint16_t)); const __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, vacc_lo); } } }
1,607
26.724138
90
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-neonfp16-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/neonfp16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__neonfp16_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const float16x8_t vh0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vh1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float32x4_t vf0 = vcvt_f32_f16(vget_low_f16(vh0)); const float32x4_t vf1 = vcvt_f32_f16(vget_high_f16(vh0)); const float32x4_t vf2 = vcvt_f32_f16(vget_low_f16(vh1)); const float32x4_t vf3 = vcvt_f32_f16(vget_high_f16(vh1)); vst1q_f32(output, vf0); output += 4; vst1q_f32(output, vf1); output += 4; vst1q_f32(output, vf2); output += 4; vst1q_f32(output, vf3); output += 4; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float32x4_t vf_lo = vcvt_f32_f16(vget_low_f16(vh)); const float32x4_t vf_hi = vcvt_f32_f16(vget_high_f16(vh)); vst1q_f32(output, vf_lo); output += 4; vst1q_f32(output, vf_hi); output += 4; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 7 * sizeof(uint16_t)); const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float32x4_t vf = vcvt_f32_f16(vget_low_f16(vh)); if (batch & (4 * sizeof(uint16_t))) { vst1q_f32(output, vf); output += 4; vf = vcvt_f32_f16(vget_high_f16(vh)); } float32x2_t vf_lo = vget_low_f32(vf); if (batch & (2 * sizeof(uint16_t))) { vst1_f32(output, vf_lo); output += 2; vf_lo = vget_high_f32(vf); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_f32(output, vf_lo, 0); } } }
2,376
31.561644
90
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-neonfp16-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/neonfp16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__neonfp16_x8( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float32x4_t vf_lo = vcvt_f32_f16(vget_low_f16(vh)); const float32x4_t vf_hi = vcvt_f32_f16(vget_high_f16(vh)); vst1q_f32(output, vf_lo); output += 4; vst1q_f32(output, vf_hi); output += 4; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(uint16_t)); assert(batch <= 7 * sizeof(uint16_t)); const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float32x4_t vf = vcvt_f32_f16(vget_low_f16(vh)); if (batch & (4 * sizeof(uint16_t))) { vst1q_f32(output, vf); output += 4; vf = vcvt_f32_f16(vget_high_f16(vh)); } float32x2_t vf_lo = vget_low_f32(vf); if (batch & (2 * sizeof(uint16_t))) { vst1_f32(output, vf_lo); output += 2; vf_lo = vget_high_f32(vf); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_f32(output, vf_lo, 0); } } }
1,738
28.474576
90
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__scalar_x1( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint32_t vsign_mask = params->scalar.sign_mask; const uint32_t vexp_offset = params->scalar.exp_offset; const float vexp_scale = params->scalar.exp_scale; const uint32_t vmagic_mask = params->scalar.magic_mask; const float vmagic_bias = params->scalar.magic_bias; const uint32_t vdenorm_cutoff = params->scalar.denorm_cutoff; const uint16_t* i = (const uint16_t*) input; uint32_t* o = (uint32_t*) output; do { const uint16_t vh = *i++; const uint32_t vw = (uint32_t) vh << 16; const uint32_t vsign = vw & vsign_mask; const uint32_t v2w = vw + vw; const uint32_t vnorm = float_as_uint32(uint32_as_float((v2w >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm = float_as_uint32(uint32_as_float((v2w >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf = vsign | (XNN_UNPREDICTABLE(v2w < vdenorm_cutoff) ? vdenorm : vnorm); *o++ = vf; batch -= sizeof(uint16_t); } while (batch != 0); }
1,636
30.480769
103
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__scalar_x2( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint32_t vsign_mask = params->scalar.sign_mask; const uint32_t vexp_offset = params->scalar.exp_offset; const float vexp_scale = params->scalar.exp_scale; const uint32_t vmagic_mask = params->scalar.magic_mask; const float vmagic_bias = params->scalar.magic_bias; const uint32_t vdenorm_cutoff = params->scalar.denorm_cutoff; const uint16_t* i = (const uint16_t*) input; uint32_t* o = (uint32_t*) output; for (; batch >= 2 * sizeof(uint16_t); batch -= 2 * sizeof(uint16_t)) { const uint16_t vh0 = i[0]; const uint16_t vh1 = i[1]; i += 2; const uint32_t vw0 = (uint32_t) vh0 << 16; const uint32_t vw1 = (uint32_t) vh1 << 16; const uint32_t vsign0 = vw0 & vsign_mask; const uint32_t vsign1 = vw1 & vsign_mask; const uint32_t v2w0 = vw0 + vw0; const uint32_t v2w1 = vw1 + vw1; const uint32_t vnorm0 = float_as_uint32(uint32_as_float((v2w0 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm1 = float_as_uint32(uint32_as_float((v2w1 >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm0 = float_as_uint32(uint32_as_float((v2w0 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm1 = float_as_uint32(uint32_as_float((v2w1 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf0 = vsign0 | (XNN_UNPREDICTABLE(v2w0 < vdenorm_cutoff) ? vdenorm0 : vnorm0); const uint32_t vf1 = vsign1 | (XNN_UNPREDICTABLE(v2w1 < vdenorm_cutoff) ? vdenorm1 : vnorm1); o[0] = vf0; o[1] = vf1; o += 2; } if XNN_UNLIKELY(batch != 0) { const uint16_t vh = *i; const uint32_t vw = (uint32_t) vh << 16; const uint32_t vsign = vw & vsign_mask; const uint32_t v2w = vw + vw; const uint32_t vnorm = float_as_uint32(uint32_as_float((v2w >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm = float_as_uint32(uint32_as_float((v2w >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf = vsign | (XNN_UNPREDICTABLE(v2w < vdenorm_cutoff) ? vdenorm : vnorm); *o = vf; } }
2,679
33.805195
105
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-scalar-x3.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__scalar_x3( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint32_t vsign_mask = params->scalar.sign_mask; const uint32_t vexp_offset = params->scalar.exp_offset; const float vexp_scale = params->scalar.exp_scale; const uint32_t vmagic_mask = params->scalar.magic_mask; const float vmagic_bias = params->scalar.magic_bias; const uint32_t vdenorm_cutoff = params->scalar.denorm_cutoff; const uint16_t* i = (const uint16_t*) input; uint32_t* o = (uint32_t*) output; for (; batch >= 3 * sizeof(uint16_t); batch -= 3 * sizeof(uint16_t)) { const uint16_t vh0 = i[0]; const uint16_t vh1 = i[1]; const uint16_t vh2 = i[2]; i += 3; const uint32_t vw0 = (uint32_t) vh0 << 16; const uint32_t vw1 = (uint32_t) vh1 << 16; const uint32_t vw2 = (uint32_t) vh2 << 16; const uint32_t vsign0 = vw0 & vsign_mask; const uint32_t vsign1 = vw1 & vsign_mask; const uint32_t vsign2 = vw2 & vsign_mask; const uint32_t v2w0 = vw0 + vw0; const uint32_t v2w1 = vw1 + vw1; const uint32_t v2w2 = vw2 + vw2; const uint32_t vnorm0 = float_as_uint32(uint32_as_float((v2w0 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm1 = float_as_uint32(uint32_as_float((v2w1 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm2 = float_as_uint32(uint32_as_float((v2w2 >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm0 = float_as_uint32(uint32_as_float((v2w0 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm1 = float_as_uint32(uint32_as_float((v2w1 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm2 = float_as_uint32(uint32_as_float((v2w2 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf0 = vsign0 | (XNN_UNPREDICTABLE(v2w0 < vdenorm_cutoff) ? vdenorm0 : vnorm0); const uint32_t vf1 = vsign1 | (XNN_UNPREDICTABLE(v2w1 < vdenorm_cutoff) ? vdenorm1 : vnorm1); const uint32_t vf2 = vsign2 | (XNN_UNPREDICTABLE(v2w2 < vdenorm_cutoff) ? vdenorm2 : vnorm2); o[0] = vf0; o[1] = vf1; o[2] = vf2; o += 3; } if XNN_UNLIKELY(batch != 0) { do { const uint16_t vh = *i++; const uint32_t vw = (uint32_t) vh << 16; const uint32_t vsign = vw & vsign_mask; const uint32_t v2w = vw + vw; const uint32_t vnorm = float_as_uint32(uint32_as_float((v2w >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm = float_as_uint32(uint32_as_float((v2w >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf = vsign | (XNN_UNPREDICTABLE(v2w < vdenorm_cutoff) ? vdenorm : vnorm); *o++ = vf; batch -= sizeof(uint16_t); } while (batch != 0); } }
3,251
35.539326
105
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__scalar_x4( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint32_t vsign_mask = params->scalar.sign_mask; const uint32_t vexp_offset = params->scalar.exp_offset; const float vexp_scale = params->scalar.exp_scale; const uint32_t vmagic_mask = params->scalar.magic_mask; const float vmagic_bias = params->scalar.magic_bias; const uint32_t vdenorm_cutoff = params->scalar.denorm_cutoff; const uint16_t* i = (const uint16_t*) input; uint32_t* o = (uint32_t*) output; for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) { const uint16_t vh0 = i[0]; const uint16_t vh1 = i[1]; const uint16_t vh2 = i[2]; const uint16_t vh3 = i[3]; i += 4; const uint32_t vw0 = (uint32_t) vh0 << 16; const uint32_t vw1 = (uint32_t) vh1 << 16; const uint32_t vw2 = (uint32_t) vh2 << 16; const uint32_t vw3 = (uint32_t) vh3 << 16; const uint32_t vsign0 = vw0 & vsign_mask; const uint32_t vsign1 = vw1 & vsign_mask; const uint32_t vsign2 = vw2 & vsign_mask; const uint32_t vsign3 = vw3 & vsign_mask; const uint32_t v2w0 = vw0 + vw0; const uint32_t v2w1 = vw1 + vw1; const uint32_t v2w2 = vw2 + vw2; const uint32_t v2w3 = vw3 + vw3; const uint32_t vnorm0 = float_as_uint32(uint32_as_float((v2w0 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm1 = float_as_uint32(uint32_as_float((v2w1 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm2 = float_as_uint32(uint32_as_float((v2w2 >> 4) + vexp_offset) * vexp_scale); const uint32_t vnorm3 = float_as_uint32(uint32_as_float((v2w3 >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm0 = float_as_uint32(uint32_as_float((v2w0 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm1 = float_as_uint32(uint32_as_float((v2w1 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm2 = float_as_uint32(uint32_as_float((v2w2 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vdenorm3 = float_as_uint32(uint32_as_float((v2w3 >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf0 = vsign0 | (XNN_UNPREDICTABLE(v2w0 < vdenorm_cutoff) ? vdenorm0 : vnorm0); const uint32_t vf1 = vsign1 | (XNN_UNPREDICTABLE(v2w1 < vdenorm_cutoff) ? vdenorm1 : vnorm1); const uint32_t vf2 = vsign2 | (XNN_UNPREDICTABLE(v2w2 < vdenorm_cutoff) ? vdenorm2 : vnorm2); const uint32_t vf3 = vsign3 | (XNN_UNPREDICTABLE(v2w3 < vdenorm_cutoff) ? vdenorm3 : vnorm3); o[0] = vf0; o[1] = vf1; o[2] = vf2; o[3] = vf3; o += 4; } if XNN_UNLIKELY(batch != 0) { do { const uint16_t vh = *i++; const uint32_t vw = (uint32_t) vh << 16; const uint32_t vsign = vw & vsign_mask; const uint32_t v2w = vw + vw; const uint32_t vnorm = float_as_uint32(uint32_as_float((v2w >> 4) + vexp_offset) * vexp_scale); const uint32_t vdenorm = float_as_uint32(uint32_as_float((v2w >> 17) | vmagic_mask) - vmagic_bias); const uint32_t vf = vsign | (XNN_UNPREDICTABLE(v2w < vdenorm_cutoff) ? vdenorm : vnorm); *o++ = vf; batch -= sizeof(uint16_t); } while (batch != 0); } }
3,734
37.505155
105
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse2-int16-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__sse2_int16_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0))); const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1))); const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2))); const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); output += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
7,954
49.66879
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse2-int16-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__sse2_int16_x24( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13); const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff); const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0))); const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1))); const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2))); const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3))); const __m128i vxmask4 = _mm_unpacklo_epi16(vmask2, vmask2); const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2), _mm_or_si128(_mm_and_si128(vxmask4, vnorm4), _mm_andnot_si128(vxmask4, vdenorm4))); const __m128i vxmask5 = _mm_unpackhi_epi16(vmask2, vmask2); const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2), _mm_or_si128(_mm_and_si128(vxmask5, vnorm5), _mm_andnot_si128(vxmask5, vdenorm5))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4)); _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5)); output += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
9,475
53.148571
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse2-int16-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__sse2_int16_x32( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask); const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2); const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13); const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset); const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13); const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale)); const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale)); const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias)); const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias)); const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff); const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff); const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0))); const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1))); const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2))); const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3))); const __m128i vxmask4 = _mm_unpacklo_epi16(vmask2, vmask2); const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2), _mm_or_si128(_mm_and_si128(vxmask4, vnorm4), _mm_andnot_si128(vxmask4, vdenorm4))); const __m128i vxmask5 = _mm_unpackhi_epi16(vmask2, vmask2); const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2), _mm_or_si128(_mm_and_si128(vxmask5, vnorm5), _mm_andnot_si128(vxmask5, vdenorm5))); const __m128i vxmask6 = _mm_unpacklo_epi16(vmask3, vmask3); const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3), _mm_or_si128(_mm_and_si128(vxmask6, vnorm6), _mm_andnot_si128(vxmask6, vdenorm6))); const __m128i vxmask7 = _mm_unpackhi_epi16(vmask3, vmask3); const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3), _mm_or_si128(_mm_and_si128(vxmask7, vnorm7), _mm_andnot_si128(vxmask7, vdenorm7))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4)); _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5)); _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6)); _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7)); output += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
10,996
55.979275
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse2-int16-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__sse2_int16_x8( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask); vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
4,808
43.119266
134
c
XNNPACK
XNNPACK-master/src/f16-f32-vcvt/gen/f16-f32-vcvt-sse41-int16-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-f32-vcvt/sse-int16.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/common.h> #include <xnnpack/vcvt.h> void xnn_f16_f32_vcvt_ukernel__sse41_int16_x16( size_t batch, const void* input, float* output, const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask); const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset); const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale); const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask); const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias); const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff); const uint16_t* i = (const uint16_t*) input; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vh0 = _mm_loadu_si128((const __m128i*) i); const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask); const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask); const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0); const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1); const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13); const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset); const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13); const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset); const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale)); const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale)); const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias)); const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias)); const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff); const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff); const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0))); const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0), _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0))); const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1))); const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1), _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1))); _mm_storeu_ps(output, _mm_castsi128_ps(vf0)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1)); _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2)); _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3)); output += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo)); _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi)); output += 8; } if XNN_UNPREDICTABLE(batch != 0) { const __m128i vh = _mm_loadu_si128((const __m128i*) i); const __m128i vsign = _mm_and_si128(vh, vsign_mask); const __m128i vnonsign = _mm_xor_si128(vh, vsign); const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13); const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset); const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale)); const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias)); const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff); __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask))); if (batch & (4 * sizeof(uint16_t))) { _mm_storeu_ps(output, _mm_castsi128_ps(vf)); output += 4; vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign), _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask))); } if (batch & (2 * sizeof(uint16_t))) { _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf)); output += 2; vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf))); } if (batch & (1 * sizeof(uint16_t))) { _mm_store_ss(output, _mm_castsi128_ps(vf)); } } }
7,291
47.939597
134
c