repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmaq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 15,837 | 51.9699 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,901 | 39.512397 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,606 | 43.138462 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,693 | 45.903509 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,546 | 48.203922 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,399 | 50.06383 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmsq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmsq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,252 | 51.598706 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,938 | 39.154472 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr1recps1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,837 | 42.970149 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,739 | 46.105263 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,604 | 48.431373 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,469 | 50.312057 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmsq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmsq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,334 | 51.864078 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,948 | 39.235772 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,871 | 43.139303 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,637 | 45.657895 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,478 | 47.937255 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,319 | 49.780142 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmsq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmsq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,160 | 51.300971 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,918 | 38.99187 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut64-p2-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,793 | 42.751244 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 7,705 | 38.927461 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,966 | 40.901869 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,227 | 42.523404 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc4, vc5, vtKLMN);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc1, vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t veKLMN = vfmaq_f32(vsKLMN, vpKLMN, vtKLMN);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(veKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(veKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,488 | 43.878906 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 3,828 | 34.453704 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr1recps1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr1recps1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 6,441 | 36.453488 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 7,751 | 39.165803 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,024 | 41.172897 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,297 | 42.821277 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc4, vc5, vtKLMN);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc1, vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t veKLMN = vfmaq_f32(vsKLMN, vpKLMN, vtKLMN);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(veKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(veKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,570 | 44.199219 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 3,838 | 34.546296 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 6,475 | 36.651163 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 7,649 | 38.637306 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,898 | 40.584112 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,147 | 42.182979 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc4, vc5, vtKLMN);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc1, vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t veKLMN = vfmaq_f32(vsKLMN, vpKLMN, vtKLMN);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(veKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(veKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,396 | 43.519531 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 3,808 | 34.268519 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-p5-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 6,397 | 36.197674 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut2048-p1-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut2048-p1-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut2048_p1_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut2048_p1.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut2048_p1.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x7FF);
const float vln2_hi = params->scalar_rr2_lut2048_p1.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut2048_p1.ln2_lo;
const float vc1 = params->scalar_rr2_lut2048_p1.c1;
const float vone = params->scalar_rr2_lut2048_p1.one;
const float vdenorm_cutoff = params->scalar_rr2_lut2048_p1.denorm_cutoff;
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 12;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
const float vp = vt * vc1;
const float vy = vp * vs + vs;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,103 | 28.222222 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut2048-p1-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut2048-p1-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut2048_p1_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut2048_p1.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut2048_p1.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x7FF);
const float vln2_hi = params->scalar_rr2_lut2048_p1.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut2048_p1.ln2_lo;
const float vc1 = params->scalar_rr2_lut2048_p1.c1;
const float vone = params->scalar_rr2_lut2048_p1.one;
const float vdenorm_cutoff = params->scalar_rr2_lut2048_p1.denorm_cutoff;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t ve0 = float_as_uint32(vn0) << 12;
const uint32_t ve1 = float_as_uint32(vn1) << 12;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx0] + ve0);
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx1] + ve1);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
const float vp0 = vt0 * vc1;
const float vp1 = vt1 * vc1;
const float vy0 = vp0 * vs0 + vs0;
const float vy1 = vp1 * vs1 + vs1;
const float vd0 = vy0 + vone;
const float vd1 = vy1 + vone;
float vf0 = vy0 / vd0;
float vf1 = vy1 / vd1;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
output[0] = vf0;
output[1] = vf1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 12;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
const float vp = vt * vc1;
const float vy = vp * vs + vs;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output = vf;
}
}
| 3,603 | 27.15625 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut2048-p1-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut2048-p1-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut2048_p1_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut2048_p1.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut2048_p1.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x7FF);
const float vln2_hi = params->scalar_rr2_lut2048_p1.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut2048_p1.ln2_lo;
const float vc1 = params->scalar_rr2_lut2048_p1.c1;
const float vone = params->scalar_rr2_lut2048_p1.one;
const float vdenorm_cutoff = params->scalar_rr2_lut2048_p1.denorm_cutoff;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
const float vz2 = fabsf(vx2);
const float vz3 = fabsf(vx3);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t ve0 = float_as_uint32(vn0) << 12;
const uint32_t ve1 = float_as_uint32(vn1) << 12;
const uint32_t ve2 = float_as_uint32(vn2) << 12;
const uint32_t ve3 = float_as_uint32(vn3) << 12;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx0] + ve0);
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx1] + ve1);
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
const float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx2] + ve2);
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
const float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx3] + ve3);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
float vt2 = vn2 * vln2_hi + vz2;
float vt3 = vn3 * vln2_hi + vz3;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
vt2 = vn2 * vln2_lo + vt2;
vt3 = vn3 * vln2_lo + vt3;
const float vp0 = vt0 * vc1;
const float vp1 = vt1 * vc1;
const float vp2 = vt2 * vc1;
const float vp3 = vt3 * vc1;
const float vy0 = vp0 * vs0 + vs0;
const float vy1 = vp1 * vs1 + vs1;
const float vy2 = vp2 * vs2 + vs2;
const float vy3 = vp3 * vs3 + vs3;
const float vd0 = vy0 + vone;
const float vd1 = vy1 + vone;
const float vd2 = vy2 + vone;
const float vd3 = vy3 + vone;
float vf0 = vy0 / vd0;
float vf1 = vy1 / vd1;
float vf2 = vy2 / vd2;
float vf3 = vy3 / vd3;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 > vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 > vdenorm_cutoff) {
vf3 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
if XNN_UNPREDICTABLE(vx2 > 0.0f) {
vf2 = vone - vf2;
}
if XNN_UNPREDICTABLE(vx3 > 0.0f) {
vf3 = vone - vf3;
}
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 12;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_2048[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
const float vp = vt * vc1;
const float vy = vp * vs + vs;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,123 | 28.790698 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut64-p2-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut64_p2.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vln2_hi = params->scalar_rr2_lut64_p2.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut64_p2.ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vone = params->scalar_rr2_lut64_p2.one;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 17;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc2;
vp = vt - vp * vt;
const float vy = vs - vs * vp;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,096 | 27.337838 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut64-p2-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut64_p2.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vln2_hi = params->scalar_rr2_lut64_p2.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut64_p2.ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vone = params->scalar_rr2_lut64_p2.one;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const uint32_t ve0 = float_as_uint32(vn0) << 17;
const uint32_t ve1 = float_as_uint32(vn1) << 17;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx0] + ve0);
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx1] + ve1);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
vp0 = vt0 - vp0 * vt0;
vp1 = vt1 - vp1 * vt1;
const float vy0 = vs0 - vs0 * vp0;
const float vy1 = vs1 - vs1 * vp1;
const float vd0 = vy0 + vone;
const float vd1 = vy1 + vone;
float vf0 = vy0 / vd0;
float vf1 = vy1 / vd1;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
output[0] = vf0;
output[1] = vf1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 17;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc2;
vp = vt - vp * vt;
const float vy = vs - vs * vp;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output = vf;
}
}
| 3,635 | 26.338346 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-lut64-p2-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const float vminus_log2e = params->scalar_rr2_lut64_p2.minus_log2e;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vln2_hi = params->scalar_rr2_lut64_p2.ln2_hi;
const float vln2_lo = params->scalar_rr2_lut64_p2.ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vone = params->scalar_rr2_lut64_p2.one;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
const float vz2 = fabsf(vx2);
const float vz3 = fabsf(vx3);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const uint32_t ve0 = float_as_uint32(vn0) << 17;
const uint32_t ve1 = float_as_uint32(vn1) << 17;
const uint32_t ve2 = float_as_uint32(vn2) << 17;
const uint32_t ve3 = float_as_uint32(vn3) << 17;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx0] + ve0);
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx1] + ve1);
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
const float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx2] + ve2);
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
const float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx3] + ve3);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
float vt2 = vn2 * vln2_hi + vz2;
float vt3 = vn3 * vln2_hi + vz3;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
vt2 = vn2 * vln2_lo + vt2;
vt3 = vn3 * vln2_lo + vt3;
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
float vp2 = vt2 * vc2;
float vp3 = vt3 * vc2;
vp0 = vt0 - vp0 * vt0;
vp1 = vt1 - vp1 * vt1;
vp2 = vt2 - vp2 * vt2;
vp3 = vt3 - vp3 * vt3;
const float vy0 = vs0 - vs0 * vp0;
const float vy1 = vs1 - vs1 * vp1;
const float vy2 = vs2 - vs2 * vp2;
const float vy3 = vs3 - vs3 * vp3;
const float vd0 = vy0 + vone;
const float vd1 = vy1 + vone;
const float vd2 = vy2 + vone;
const float vd3 = vy3 + vone;
float vf0 = vy0 / vd0;
float vf1 = vy1 / vd1;
float vf2 = vy2 / vd2;
float vf3 = vy3 / vd3;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 > vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 > vdenorm_cutoff) {
vf3 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
if XNN_UNPREDICTABLE(vx2 > 0.0f) {
vf2 = vone - vf2;
}
if XNN_UNPREDICTABLE(vx3 > 0.0f) {
vf3 = vone - vf3;
}
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const uint32_t ve = float_as_uint32(vn) << 17;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
const float vs = uint32_as_float(xnn_table_exp2minus_k_over_64[vidx] + ve);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc2;
vp = vt - vp * vt;
const float vy = vs - vs * vp;
const float vd = vy + vone;
float vf = vy / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,195 | 28.027933 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-p5-div-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_log2e = params->scalar_rr2_p5.minus_log2e;
const float vln2_hi = params->scalar_rr2_p5.ln2_hi;
const float vln2_lo = params->scalar_rr2_p5.ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vone = params->scalar_rr2_p5.one;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc5 + vc4;
vp = vt * vp + vc3;
vp = vt * vp + vc2;
vp = vt * vp + vc1;
vt *= vs;
const float ve = vt * vp + vs;
const float vd = ve + vone;
float vf = ve / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
| 1,992 | 25.573333 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-p5-div-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_log2e = params->scalar_rr2_p5.minus_log2e;
const float vln2_hi = params->scalar_rr2_p5.ln2_hi;
const float vln2_lo = params->scalar_rr2_p5.ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vone = params->scalar_rr2_p5.one;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
float vp0 = vt0 * vc5 + vc4;
float vp1 = vt1 * vc5 + vc4;
vp0 = vt0 * vp0 + vc3;
vp1 = vt1 * vp1 + vc3;
vp0 = vt0 * vp0 + vc2;
vp1 = vt1 * vp1 + vc2;
vp0 = vt0 * vp0 + vc1;
vp1 = vt1 * vp1 + vc1;
vt0 *= vs0;
vt1 *= vs1;
const float ve0 = vt0 * vp0 + vs0;
const float ve1 = vt1 * vp1 + vs1;
const float vd0 = ve0 + vone;
const float vd1 = ve1 + vone;
float vf0 = ve0 / vd0;
float vf1 = ve1 / vd1;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
output[0] = vf0;
output[1] = vf1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc5 + vc4;
vp = vt * vp + vc3;
vp = vt * vp + vc2;
vp = vt * vp + vc1;
vt *= vs;
const float ve = vt * vp + vs;
const float vd = ve + vone;
float vf = ve / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output = vf;
}
}
| 3,421 | 23.797101 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-scalar-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/scalar-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_log2e = params->scalar_rr2_p5.minus_log2e;
const float vln2_hi = params->scalar_rr2_p5.ln2_hi;
const float vln2_lo = params->scalar_rr2_p5.ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vone = params->scalar_rr2_p5.one;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vz0 = fabsf(vx0);
const float vz1 = fabsf(vx1);
const float vz2 = fabsf(vx2);
const float vz3 = fabsf(vx3);
float vn0 = vz0 * vminus_log2e + vmagic_bias;
float vn1 = vz1 * vminus_log2e + vmagic_bias;
float vn2 = vz2 * vminus_log2e + vmagic_bias;
float vn3 = vz3 * vminus_log2e + vmagic_bias;
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
const float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
const float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
float vt0 = vn0 * vln2_hi + vz0;
float vt1 = vn1 * vln2_hi + vz1;
float vt2 = vn2 * vln2_hi + vz2;
float vt3 = vn3 * vln2_hi + vz3;
vt0 = vn0 * vln2_lo + vt0;
vt1 = vn1 * vln2_lo + vt1;
vt2 = vn2 * vln2_lo + vt2;
vt3 = vn3 * vln2_lo + vt3;
float vp0 = vt0 * vc5 + vc4;
float vp1 = vt1 * vc5 + vc4;
float vp2 = vt2 * vc5 + vc4;
float vp3 = vt3 * vc5 + vc4;
vp0 = vt0 * vp0 + vc3;
vp1 = vt1 * vp1 + vc3;
vp2 = vt2 * vp2 + vc3;
vp3 = vt3 * vp3 + vc3;
vp0 = vt0 * vp0 + vc2;
vp1 = vt1 * vp1 + vc2;
vp2 = vt2 * vp2 + vc2;
vp3 = vt3 * vp3 + vc2;
vp0 = vt0 * vp0 + vc1;
vp1 = vt1 * vp1 + vc1;
vp2 = vt2 * vp2 + vc1;
vp3 = vt3 * vp3 + vc1;
vt0 *= vs0;
vt1 *= vs1;
vt2 *= vs2;
vt3 *= vs3;
const float ve0 = vt0 * vp0 + vs0;
const float ve1 = vt1 * vp1 + vs1;
const float ve2 = vt2 * vp2 + vs2;
const float ve3 = vt3 * vp3 + vs3;
const float vd0 = ve0 + vone;
const float vd1 = ve1 + vone;
const float vd2 = ve2 + vone;
const float vd3 = ve3 + vone;
float vf0 = ve0 / vd0;
float vf1 = ve1 / vd1;
float vf2 = ve2 / vd2;
float vf3 = ve3 / vd3;
if XNN_UNPREDICTABLE(vz0 > vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 > vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 > vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 > vdenorm_cutoff) {
vf3 = 0.0f;
}
if XNN_UNPREDICTABLE(vx0 > 0.0f) {
vf0 = vone - vf0;
}
if XNN_UNPREDICTABLE(vx1 > 0.0f) {
vf1 = vone - vf1;
}
if XNN_UNPREDICTABLE(vx2 > 0.0f) {
vf2 = vone - vf2;
}
if XNN_UNPREDICTABLE(vx3 > 0.0f) {
vf3 = vone - vf3;
}
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vz = fabsf(vx);
float vn = vz * vminus_log2e + vmagic_bias;
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vln2_hi + vz;
vt = vn * vln2_lo + vt;
float vp = vt * vc5 + vc4;
vp = vt * vp + vc3;
vp = vt * vp + vc2;
vp = vt * vp + vc1;
vt *= vs;
const float ve = vt * vp + vs;
const float vd = ve + vone;
float vf = ve / vd;
if XNN_UNPREDICTABLE(vz > vdenorm_cutoff) {
vf = 0.0f;
}
if XNN_UNPREDICTABLE(vx > 0.0f) {
vf = vone - vf;
}
*output++ = vf;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,871 | 25.193548 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
vf0123 = _mm_or_ps(_mm_and_ps(vf0123, vm0123), _mm_andnot_ps(vm0123, _mm_sub_ps(vone, vf0123)));
vf4567 = _mm_or_ps(_mm_and_ps(vf4567, vm4567), _mm_andnot_ps(vm4567, _mm_sub_ps(vone, vf4567)));
vf89AB = _mm_or_ps(_mm_and_ps(vf89AB, vm89AB), _mm_andnot_ps(vm89AB, _mm_sub_ps(vone, vf89AB)));
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
input += 12;
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 7,664 | 39.13089 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
vf0123 = _mm_or_ps(_mm_and_ps(vf0123, vm0123), _mm_andnot_ps(vm0123, _mm_sub_ps(vone, vf0123)));
vf4567 = _mm_or_ps(_mm_and_ps(vf4567, vm4567), _mm_andnot_ps(vm4567, _mm_sub_ps(vone, vf4567)));
vf89AB = _mm_or_ps(_mm_and_ps(vf89AB, vm89AB), _mm_andnot_ps(vm89AB, _mm_sub_ps(vone, vf89AB)));
vfCDEF = _mm_or_ps(_mm_and_ps(vfCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, _mm_sub_ps(vone, vfCDEF)));
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
input += 16;
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 8,887 | 41.32381 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 veGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vdGHIJ = _mm_add_ps(veGHIJ, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
__m128 vfGHIJ = _mm_div_ps(veGHIJ, vdGHIJ);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vzGHIJ, vdenorm_cutoff), vfGHIJ);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
vf0123 = _mm_or_ps(_mm_and_ps(vf0123, vm0123), _mm_andnot_ps(vm0123, _mm_sub_ps(vone, vf0123)));
vf4567 = _mm_or_ps(_mm_and_ps(vf4567, vm4567), _mm_andnot_ps(vm4567, _mm_sub_ps(vone, vf4567)));
vf89AB = _mm_or_ps(_mm_and_ps(vf89AB, vm89AB), _mm_andnot_ps(vm89AB, _mm_sub_ps(vone, vf89AB)));
vfCDEF = _mm_or_ps(_mm_and_ps(vfCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, _mm_sub_ps(vone, vfCDEF)));
vfGHIJ = _mm_or_ps(_mm_and_ps(vfGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, _mm_sub_ps(vone, vfGHIJ)));
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
input += 20;
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 10,110 | 43.152838 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
__m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc5, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 veGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
__m128 veKLMN = _mm_add_ps(_mm_mul_ps(vtKLMN, vpKLMN), vsKLMN);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vdGHIJ = _mm_add_ps(veGHIJ, vone);
__m128 vdKLMN = _mm_add_ps(veKLMN, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
__m128 vfGHIJ = _mm_div_ps(veGHIJ, vdGHIJ);
__m128 vfKLMN = _mm_div_ps(veKLMN, vdKLMN);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vzGHIJ, vdenorm_cutoff), vfGHIJ);
vfKLMN = _mm_andnot_ps(_mm_cmplt_ps(vzKLMN, vdenorm_cutoff), vfKLMN);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
const __m128 vmKLMN = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxKLMN)));
vf0123 = _mm_or_ps(_mm_and_ps(vf0123, vm0123), _mm_andnot_ps(vm0123, _mm_sub_ps(vone, vf0123)));
vf4567 = _mm_or_ps(_mm_and_ps(vf4567, vm4567), _mm_andnot_ps(vm4567, _mm_sub_ps(vone, vf4567)));
vf89AB = _mm_or_ps(_mm_and_ps(vf89AB, vm89AB), _mm_andnot_ps(vm89AB, _mm_sub_ps(vone, vf89AB)));
vfCDEF = _mm_or_ps(_mm_and_ps(vfCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, _mm_sub_ps(vone, vfCDEF)));
vfGHIJ = _mm_or_ps(_mm_and_ps(vfGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, _mm_sub_ps(vone, vfGHIJ)));
vfKLMN = _mm_or_ps(_mm_and_ps(vfKLMN, vmKLMN), _mm_andnot_ps(vmKLMN, _mm_sub_ps(vone, vfKLMN)));
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
_mm_storeu_ps(output + 20, vfKLMN);
input += 24;
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 11,333 | 44.701613 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 3,881 | 33.972973 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse2-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vf0123 = _mm_or_ps(_mm_and_ps(vf0123, vm0123), _mm_andnot_ps(vm0123, _mm_sub_ps(vone, vf0123)));
vf4567 = _mm_or_ps(_mm_and_ps(vf4567, vm4567), _mm_andnot_ps(vm4567, _mm_sub_ps(vone, vf4567)));
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
input += 8;
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vf = _mm_or_ps(_mm_and_ps(vf, vm), _mm_andnot_ps(vm, _mm_sub_ps(vone, vf)));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 6,438 | 36.436047 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vf0123 = _mm_blendv_ps(_mm_sub_ps(vone, vf0123), vf0123, vx0123);
vf4567 = _mm_blendv_ps(_mm_sub_ps(vone, vf4567), vf4567, vx4567);
vf89AB = _mm_blendv_ps(_mm_sub_ps(vone, vf89AB), vf89AB, vx89AB);
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
input += 12;
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 6,993 | 36.805405 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
vf0123 = _mm_blendv_ps(_mm_sub_ps(vone, vf0123), vf0123, vx0123);
vf4567 = _mm_blendv_ps(_mm_sub_ps(vone, vf4567), vf4567, vx4567);
vf89AB = _mm_blendv_ps(_mm_sub_ps(vone, vf89AB), vf89AB, vx89AB);
vfCDEF = _mm_blendv_ps(_mm_sub_ps(vone, vfCDEF), vfCDEF, vxCDEF);
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
input += 16;
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 8,077 | 38.793103 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 veGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vdGHIJ = _mm_add_ps(veGHIJ, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
__m128 vfGHIJ = _mm_div_ps(veGHIJ, vdGHIJ);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vzGHIJ, vdenorm_cutoff), vfGHIJ);
vf0123 = _mm_blendv_ps(_mm_sub_ps(vone, vf0123), vf0123, vx0123);
vf4567 = _mm_blendv_ps(_mm_sub_ps(vone, vf4567), vf4567, vx4567);
vf89AB = _mm_blendv_ps(_mm_sub_ps(vone, vf89AB), vf89AB, vx89AB);
vfCDEF = _mm_blendv_ps(_mm_sub_ps(vone, vfCDEF), vfCDEF, vxCDEF);
vfGHIJ = _mm_blendv_ps(_mm_sub_ps(vone, vfGHIJ), vfGHIJ, vxGHIJ);
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
input += 20;
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 9,161 | 40.457014 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vx89AB = _mm_loadu_ps(input + 8);
const __m128 vxCDEF = _mm_loadu_ps(input + 12);
const __m128 vxGHIJ = _mm_loadu_ps(input + 16);
const __m128 vxKLMN = _mm_loadu_ps(input + 20);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
const __m128 vz89AB = _mm_or_ps(vx89AB, vsign_mask);
const __m128 vzCDEF = _mm_or_ps(vxCDEF, vsign_mask);
const __m128 vzGHIJ = _mm_or_ps(vxGHIJ, vsign_mask);
const __m128 vzKLMN = _mm_or_ps(vxKLMN, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
const __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
__m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc5, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 ve89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 veCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 veGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
__m128 veKLMN = _mm_add_ps(_mm_mul_ps(vtKLMN, vpKLMN), vsKLMN);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vd89AB = _mm_add_ps(ve89AB, vone);
__m128 vdCDEF = _mm_add_ps(veCDEF, vone);
__m128 vdGHIJ = _mm_add_ps(veGHIJ, vone);
__m128 vdKLMN = _mm_add_ps(veKLMN, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
__m128 vf89AB = _mm_div_ps(ve89AB, vd89AB);
__m128 vfCDEF = _mm_div_ps(veCDEF, vdCDEF);
__m128 vfGHIJ = _mm_div_ps(veGHIJ, vdGHIJ);
__m128 vfKLMN = _mm_div_ps(veKLMN, vdKLMN);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vz89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vzCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vzGHIJ, vdenorm_cutoff), vfGHIJ);
vfKLMN = _mm_andnot_ps(_mm_cmplt_ps(vzKLMN, vdenorm_cutoff), vfKLMN);
vf0123 = _mm_blendv_ps(_mm_sub_ps(vone, vf0123), vf0123, vx0123);
vf4567 = _mm_blendv_ps(_mm_sub_ps(vone, vf4567), vf4567, vx4567);
vf89AB = _mm_blendv_ps(_mm_sub_ps(vone, vf89AB), vf89AB, vx89AB);
vfCDEF = _mm_blendv_ps(_mm_sub_ps(vone, vfCDEF), vfCDEF, vxCDEF);
vfGHIJ = _mm_blendv_ps(_mm_sub_ps(vone, vfGHIJ), vfGHIJ, vxGHIJ);
vfKLMN = _mm_blendv_ps(_mm_sub_ps(vone, vfKLMN), vfKLMN, vxKLMN);
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
_mm_storeu_ps(output + 20, vfKLMN);
input += 24;
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 10,245 | 41.870293 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 3,628 | 32.293578 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-sse41-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/sse-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsign_mask = _mm_load_ps(params->sse2_rr2_p5.sign_mask);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p5.one);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
const __m128 vz0123 = _mm_or_ps(vx0123, vsign_mask);
const __m128 vz4567 = _mm_or_ps(vx4567, vsign_mask);
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
__m128 ve0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 ve4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vd0123 = _mm_add_ps(ve0123, vone);
__m128 vd4567 = _mm_add_ps(ve4567, vone);
__m128 vf0123 = _mm_div_ps(ve0123, vd0123);
__m128 vf4567 = _mm_div_ps(ve4567, vd4567);
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vz0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vz4567, vdenorm_cutoff), vf4567);
vf0123 = _mm_blendv_ps(_mm_sub_ps(vone, vf0123), vf0123, vx0123);
vf4567 = _mm_blendv_ps(_mm_sub_ps(vone, vf4567), vf4567, vx4567);
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
input += 8;
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
_mm_storeu_ps(output, vf);
input += 4;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_or_ps(vx, vsign_mask);
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
vt = _mm_mul_ps(vt, vs);
__m128 ve = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
__m128 vd = _mm_add_ps(ve, vone);
__m128 vf = _mm_div_ps(ve, vd);
vf = _mm_andnot_ps(_mm_cmplt_ps(vz, vdenorm_cutoff), vf);
vf = _mm_blendv_ps(_mm_sub_ps(vone, vf), vf, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf);
vf = _mm_movehl_ps(vf, vf);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf);
}
}
}
| 5,906 | 34.371257 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,804 | 40.962366 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,029 | 43.04878 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t veGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, vxGHIJ);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 10,254 | 44.78125 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
v128_t vnKLMN = __builtin_wasm_relaxed_madd_f32x4(vzKLMN, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
const v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
v128_t vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_hi, vzKLMN);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_lo, vtKLMN);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vc5, vc4);
v128_t vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc3);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc2);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc1);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t veGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
const v128_t veKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vsKLMN);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(veKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(veKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, vxGHIJ);
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, vxKLMN);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,479 | 46.242798 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 4,016 | 36.542056 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-fma-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_fma_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 6,576 | 38.383234 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,680 | 40.295699 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 8,881 | 42.326829 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t veGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, vxGHIJ);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 10,082 | 44.013393 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
const v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vc5), vc4);
v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc3);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc2);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc1);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t veGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
const v128_t veKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vsKLMN);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(veKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(veKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, vx89AB);
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, vxCDEF);
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, vxGHIJ);
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, vxKLMN);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,283 | 45.436214 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 3,964 | 36.056075 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmblendvps-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmblendvps_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, vx0123);
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, vx4567);
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 6,476 | 37.784431 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
vp0123 = __builtin_wasm_relaxed_nmadd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_nmadd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_nmadd_f32x4(vp89AB, vt89AB, vt89AB);
const v128_t vy0123 = __builtin_wasm_relaxed_nmadd_f32x4(vs0123, vp0123, vs0123);
const v128_t vy4567 = __builtin_wasm_relaxed_nmadd_f32x4(vs4567, vp4567, vs4567);
const v128_t vy89AB = __builtin_wasm_relaxed_nmadd_f32x4(vs89AB, vp89AB, vs89AB);
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,255 | 47.517241 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_nmadd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_nmadd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_nmadd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vpCDEF, vtCDEF, vtCDEF);
const v128_t vy0123 = __builtin_wasm_relaxed_nmadd_f32x4(vs0123, vp0123, vs0123);
const v128_t vy4567 = __builtin_wasm_relaxed_nmadd_f32x4(vs4567, vp4567, vs4567);
const v128_t vy89AB = __builtin_wasm_relaxed_nmadd_f32x4(vs89AB, vp89AB, vs89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vsCDEF, vpCDEF, vsCDEF);
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 13,195 | 50.147287 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_nmadd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_nmadd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_nmadd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vpCDEF, vtCDEF, vtCDEF);
vpGHIJ = __builtin_wasm_relaxed_nmadd_f32x4(vpGHIJ, vtGHIJ, vtGHIJ);
const v128_t vy0123 = __builtin_wasm_relaxed_nmadd_f32x4(vs0123, vp0123, vs0123);
const v128_t vy4567 = __builtin_wasm_relaxed_nmadd_f32x4(vs4567, vp4567, vs4567);
const v128_t vy89AB = __builtin_wasm_relaxed_nmadd_f32x4(vs89AB, vp89AB, vs89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vsCDEF, vpCDEF, vsCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_nmadd_f32x4(vsGHIJ, vpGHIJ, vsGHIJ);
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 15,135 | 52.295775 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
v128_t vnKLMN = __builtin_wasm_relaxed_madd_f32x4(vzKLMN, vminus_log2e, vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t veKLMN = wasm_i32x4_shl(vnKLMN, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxN), vlKLMN, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
const v128_t vsKLMN = wasm_i32x4_add(vlKLMN, veKLMN);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
v128_t vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_hi, vzKLMN);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_lo, vtKLMN);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
v128_t vpKLMN = wasm_f32x4_mul(vtKLMN, vc2);
vp0123 = __builtin_wasm_relaxed_nmadd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_nmadd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_nmadd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vpCDEF, vtCDEF, vtCDEF);
vpGHIJ = __builtin_wasm_relaxed_nmadd_f32x4(vpGHIJ, vtGHIJ, vtGHIJ);
vpKLMN = __builtin_wasm_relaxed_nmadd_f32x4(vpKLMN, vtKLMN, vtKLMN);
const v128_t vy0123 = __builtin_wasm_relaxed_nmadd_f32x4(vs0123, vp0123, vs0123);
const v128_t vy4567 = __builtin_wasm_relaxed_nmadd_f32x4(vs4567, vp4567, vs4567);
const v128_t vy89AB = __builtin_wasm_relaxed_nmadd_f32x4(vs89AB, vp89AB, vs89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_nmadd_f32x4(vsCDEF, vpCDEF, vsCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_nmadd_f32x4(vsGHIJ, vpGHIJ, vsGHIJ);
const v128_t vyKLMN = __builtin_wasm_relaxed_nmadd_f32x4(vsKLMN, vpKLMN, vsKLMN);
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(vyKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(vyKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, wasm_i32x4_shr(vxKLMN, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 17,075 | 54.083871 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 5,322 | 39.325758 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-lut64-p2-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_lut64_p2_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
vp0123 = __builtin_wasm_relaxed_nmadd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_nmadd_f32x4(vp4567, vt4567, vt4567);
const v128_t vy0123 = __builtin_wasm_relaxed_nmadd_f32x4(vs0123, vp0123, vs0123);
const v128_t vy4567 = __builtin_wasm_relaxed_nmadd_f32x4(vs4567, vp4567, vs4567);
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = __builtin_wasm_relaxed_nmadd_f32x4(vp, vt, vt);
const v128_t vy = __builtin_wasm_relaxed_nmadd_f32x4(vs, vp, vs);
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,312 | 44.208738 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,907 | 41.516129 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,152 | 43.64878 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t veGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 10,397 | 45.419643 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vminus_log2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vminus_log2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vminus_log2e, vmagic_bias);
v128_t vnKLMN = __builtin_wasm_relaxed_madd_f32x4(vzKLMN, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
const v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_hi, vzGHIJ);
v128_t vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_hi, vzKLMN);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vln2_lo, vtGHIJ);
vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vln2_lo, vtKLMN);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vc5, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vc5, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vc5, vc4);
v128_t vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc3);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc2);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vc1);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t ve89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
const v128_t veCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
const v128_t veGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
const v128_t veKLMN = __builtin_wasm_relaxed_madd_f32x4(vtKLMN, vpKLMN, vsKLMN);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(veKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(veKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, wasm_i32x4_shr(vxKLMN, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,642 | 46.91358 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 4,059 | 36.943925 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-fma-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_fma_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vminus_log2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vminus_log2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_hi, vz4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vln2_lo, vt4567);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vc5, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vc5, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t ve0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
const v128_t ve4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vminus_log2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vt, vc5, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 6,659 | 38.88024 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,166 | 47.133621 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 13,089 | 49.736434 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_sub(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vyGHIJ = wasm_f32x4_sub(vsGHIJ, wasm_f32x4_mul(vsGHIJ, vpGHIJ));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 15,012 | 51.862676 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t veKLMN = wasm_i32x4_shl(vnKLMN, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxN), vlKLMN, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
const v128_t vsKLMN = wasm_i32x4_add(vlKLMN, veKLMN);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
v128_t vpKLMN = wasm_f32x4_mul(vtKLMN, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_sub(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_sub(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vyGHIJ = wasm_f32x4_sub(vsGHIJ, wasm_f32x4_mul(vsGHIJ, vpGHIJ));
const v128_t vyKLMN = wasm_f32x4_sub(vsKLMN, wasm_f32x4_mul(vsKLMN, vpKLMN));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(vyKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(vyKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, wasm_i32x4_shr(vxKLMN, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 16,935 | 53.632258 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 5,284 | 39.037879 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-lut64-p2-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_lut64_p2_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,240 | 43.859223 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 7,783 | 40.849462 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 9,004 | 42.926829 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t veGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 10,225 | 44.651786 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
const v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vc5), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vc5), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vc5), vc4);
v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc3);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc2);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vc1);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t ve89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
const v128_t veCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
const v128_t veGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
const v128_t veKLMN = wasm_f32x4_add(wasm_f32x4_mul(vtKLMN, vpKLMN), vsKLMN);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(veKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(veKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = __builtin_wasm_relaxed_laneselect_i32x4(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = __builtin_wasm_relaxed_laneselect_i32x4(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
vfKLMN = __builtin_wasm_relaxed_laneselect_i32x4(vfKLMN, vcfKLMN, wasm_i32x4_shr(vxKLMN, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,446 | 46.106996 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 4,007 | 36.457944 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmrelaxedsimd-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__wasmrelaxedsimd_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vc5), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vc5), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
const v128_t ve0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
const v128_t ve4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
vf0123 = __builtin_wasm_relaxed_laneselect_i32x4(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = __builtin_wasm_relaxed_laneselect_i32x4(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vc5), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vc1);
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = __builtin_wasm_relaxed_laneselect_i32x4(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 6,559 | 38.281437 | 98 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmsimd-rr2-lut64-p2-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
vf0123 = wasm_v128_bitselect(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = wasm_v128_bitselect(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = wasm_v128_bitselect(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 11,059 | 46.672414 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmsimd-rr2-lut64-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
vf0123 = wasm_v128_bitselect(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = wasm_v128_bitselect(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = wasm_v128_bitselect(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = wasm_v128_bitselect(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 12,962 | 49.244186 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmsimd-rr2-lut64-p2-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_sub(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vyGHIJ = wasm_f32x4_sub(vsGHIJ, wasm_f32x4_mul(vsGHIJ, vpGHIJ));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
vf0123 = wasm_v128_bitselect(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = wasm_v128_bitselect(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = wasm_v128_bitselect(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = wasm_v128_bitselect(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = wasm_v128_bitselect(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 14,865 | 51.34507 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-wasmsimd-rr2-lut64-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
const v128_t vx89AB = wasm_v128_load(input + 8);
const v128_t vxCDEF = wasm_v128_load(input + 12);
const v128_t vxGHIJ = wasm_v128_load(input + 16);
const v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_abs(vx0123);
const v128_t vz4567 = wasm_f32x4_abs(vx4567);
const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vminus_log2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vminus_log2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vminus_log2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vminus_log2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vminus_log2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vminus_log2e), vmagic_bias);
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ve89AB = wasm_i32x4_shl(vn89AB, 17);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t veCDEF = wasm_i32x4_shl(vnCDEF, 17);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t veGHIJ = wasm_i32x4_shl(vnGHIJ, 17);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t veKLMN = wasm_i32x4_shl(vnKLMN, 17);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidxN), vlKLMN, 3);
const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
const v128_t vs89AB = wasm_i32x4_add(vl89AB, ve89AB);
const v128_t vsCDEF = wasm_i32x4_add(vlCDEF, veCDEF);
const v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, veGHIJ);
const v128_t vsKLMN = wasm_i32x4_add(vlKLMN, veKLMN);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
v128_t vp89AB = wasm_f32x4_mul(vt89AB, vc2);
v128_t vpCDEF = wasm_f32x4_mul(vtCDEF, vc2);
v128_t vpGHIJ = wasm_f32x4_mul(vtGHIJ, vc2);
v128_t vpKLMN = wasm_f32x4_mul(vtKLMN, vc2);
vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_sub(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_sub(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_sub(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_sub(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
const v128_t vy89AB = wasm_f32x4_sub(vs89AB, wasm_f32x4_mul(vs89AB, vp89AB));
const v128_t vyCDEF = wasm_f32x4_sub(vsCDEF, wasm_f32x4_mul(vsCDEF, vpCDEF));
const v128_t vyGHIJ = wasm_f32x4_sub(vsGHIJ, wasm_f32x4_mul(vsGHIJ, vpGHIJ));
const v128_t vyKLMN = wasm_f32x4_sub(vsKLMN, wasm_f32x4_mul(vsKLMN, vpKLMN));
const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
const v128_t vd89AB = wasm_f32x4_add(vy89AB, vone);
const v128_t vdCDEF = wasm_f32x4_add(vyCDEF, vone);
const v128_t vdGHIJ = wasm_f32x4_add(vyGHIJ, vone);
const v128_t vdKLMN = wasm_f32x4_add(vyKLMN, vone);
v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
v128_t vf89AB = wasm_f32x4_div(vy89AB, vd89AB);
v128_t vfCDEF = wasm_f32x4_div(vyCDEF, vdCDEF);
v128_t vfGHIJ = wasm_f32x4_div(vyGHIJ, vdGHIJ);
v128_t vfKLMN = wasm_f32x4_div(vyKLMN, vdKLMN);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
const v128_t vcf0123 = wasm_f32x4_sub(vone, vf0123);
const v128_t vcf4567 = wasm_f32x4_sub(vone, vf4567);
const v128_t vcf89AB = wasm_f32x4_sub(vone, vf89AB);
const v128_t vcfCDEF = wasm_f32x4_sub(vone, vfCDEF);
const v128_t vcfGHIJ = wasm_f32x4_sub(vone, vfGHIJ);
const v128_t vcfKLMN = wasm_f32x4_sub(vone, vfKLMN);
vf0123 = wasm_v128_bitselect(vf0123, vcf0123, wasm_i32x4_shr(vx0123, 31));
vf4567 = wasm_v128_bitselect(vf4567, vcf4567, wasm_i32x4_shr(vx4567, 31));
vf89AB = wasm_v128_bitselect(vf89AB, vcf89AB, wasm_i32x4_shr(vx89AB, 31));
vfCDEF = wasm_v128_bitselect(vfCDEF, vcfCDEF, wasm_i32x4_shr(vxCDEF, 31));
vfGHIJ = wasm_v128_bitselect(vfGHIJ, vcfGHIJ, wasm_i32x4_shr(vxGHIJ, 31));
vfKLMN = wasm_v128_bitselect(vfKLMN, vcfKLMN, wasm_i32x4_shr(vxKLMN, 31));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
wasm_v128_store(output + 20, vfKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
wasm_v128_store(output, vf);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vminus_log2e), vmagic_bias);
const v128_t ve = wasm_i32x4_shl(vn, 17);
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx3), vl, 3);
const v128_t vs = wasm_i32x4_add(vl, ve);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vln2_lo), vt);
v128_t vp = wasm_f32x4_mul(vt, vc2);
vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
const v128_t vd = wasm_f32x4_add(vy, vone);
v128_t vf = wasm_f32x4_div(vy, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
const v128_t vcf = wasm_f32x4_sub(vone, vf);
vf = wasm_v128_bitselect(vf, vcf, wasm_i32x4_shr(vx, 31));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
}
}
}
| 16,768 | 53.093548 | 124 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.