repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-neon-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-neon.c.in // Generator: tools/xngen // // Copyright 2_lo9 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__neon_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t vb = vld1q_dup_f32(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; float32x4_t vacc = vmaxq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); float32x4_t vacc = vmaxq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,400
24.472727
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-neon-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-neon.c.in // Generator: tools/xngen // // Copyright 2_lo9 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__neon_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t vb = vld1q_dup_f32(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4; float32x4_t vaccl = vld1q_f32(input_a); input_a += 4; vacc_ = vmaxq_f32(vacc_, vb); vaccl = vmaxq_f32(vaccl, vb); vst1q_f32(output, vacc_); output += 4; vst1q_f32(output, vaccl); output += 4; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; float32x4_t vacc = vmaxq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); float32x4_t vacc = vmaxq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,745
25.059701
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; float vacc = math_max_f32(va, vb); *output++ = vacc; } }
924
23.342105
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; float vacc0 = math_max_f32(va0, vb); float vacc1 = math_max_f32(va1, vb); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; float vacc = math_max_f32(va, vb); *output = vacc; } }
1,235
22.320755
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; float vacc0 = math_max_f32(va0, vb); float vacc1 = math_max_f32(va1, vb); float vacc2 = math_max_f32(va2, vb); float vacc3 = math_max_f32(va3, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = math_max_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,470
23.114754
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; float vacc0 = math_max_f32(va0, vb); float vacc1 = math_max_f32(va1, vb); float vacc2 = math_max_f32(va2, vb); float vacc3 = math_max_f32(va3, vb); float vacc4 = math_max_f32(va4, vb); float vacc5 = math_max_f32(va5, vb); float vacc6 = math_max_f32(va6, vb); float vacc7 = math_max_f32(va7, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = math_max_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,862
24.520548
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-sse-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__sse_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 vb = _mm_load1_ps(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; __m128 vacc = _mm_max_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); __m128 vacc = _mm_max_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,399
23.561404
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-sse-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__sse_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 vb = _mm_load1_ps(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(input_a); const __m128 va1 = _mm_loadu_ps(input_a + 4); input_a += 8; __m128 vacc0 = _mm_max_ps(va0, vb); __m128 vacc1 = _mm_max_ps(va1, vb); _mm_storeu_ps(output, vacc0); _mm_storeu_ps(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; __m128 vacc = _mm_max_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); __m128 vacc = _mm_max_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,757
23.760563
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasm-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasm_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; float vacc = __builtin_wasm_max_f32(va, vb); *output++ = vacc; } }
932
23.552632
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasm-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasm_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; float vacc0 = __builtin_wasm_max_f32(va0, vb); float vacc1 = __builtin_wasm_max_f32(va1, vb); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; float vacc = __builtin_wasm_max_f32(va, vb); *output = vacc; } }
1,263
22.849057
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; float vacc0 = __builtin_wasm_max_f32(va0, vb); float vacc1 = __builtin_wasm_max_f32(va1, vb); float vacc2 = __builtin_wasm_max_f32(va2, vb); float vacc3 = __builtin_wasm_max_f32(va3, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = __builtin_wasm_max_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,518
23.901639
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; float vacc0 = __builtin_wasm_max_f32(va0, vb); float vacc1 = __builtin_wasm_max_f32(va1, vb); float vacc2 = __builtin_wasm_max_f32(va2, vb); float vacc3 = __builtin_wasm_max_f32(va3, vb); float vacc4 = __builtin_wasm_max_f32(va4, vb); float vacc5 = __builtin_wasm_max_f32(va5, vb); float vacc6 = __builtin_wasm_max_f32(va6, vb); float vacc7 = __builtin_wasm_max_f32(va7, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = __builtin_wasm_max_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,950
25.726027
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-arm-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_arm_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; v128_t vy0 = wasm_f32x4_max(va0, vb); v128_t vy1 = wasm_f32x4_max(va1, vb); v128_t vy2 = wasm_f32x4_max(va2, vb); v128_t vy3 = wasm_f32x4_max(va3, vb); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); wasm_v128_store(output + 8, vy2); wasm_v128_store(output + 12, vy3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_max(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_max(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
2,050
24.962025
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-arm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_arm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_max(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_max(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,413
22.966102
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-arm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_arm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; v128_t vy0 = wasm_f32x4_max(va0, vb); v128_t vy1 = wasm_f32x4_max(va1, vb); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_max(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_max(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,779
23.383562
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-x86-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_x86_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; v128_t vy0 = wasm_f32x4_pmax(vb, va0); v128_t vy1 = wasm_f32x4_pmax(vb, va1); v128_t vy2 = wasm_f32x4_pmax(vb, va2); v128_t vy3 = wasm_f32x4_pmax(vb, va3); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); wasm_v128_store(output + 8, vy2); wasm_v128_store(output + 12, vy3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmax(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmax(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
2,056
25.037975
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-x86-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_x86_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmax(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmax(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,415
23
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-wasmsimd-x86-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmaxc_ukernel__wasmsimd_x86_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; v128_t vy0 = wasm_f32x4_pmax(vb, va0); v128_t vy1 = wasm_f32x4_pmax(vb, va1); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmax(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmax(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,783
23.438356
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-avx-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__avx_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m256 vacc0 = _mm256_loadu_ps(input_a); __m256 vacc1 = _mm256_loadu_ps(input_a + 8); input_a += 16; vacc0 = _mm256_min_ps(vacc0, _mm256_loadu_ps(input_b)); vacc1 = _mm256_min_ps(vacc1, _mm256_loadu_ps(input_b + 8)); input_b += 16; _mm256_storeu_ps(output, vacc0); _mm256_storeu_ps(output + 8, vacc1); output += 16; } for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_min_ps(vacc, _mm256_loadu_ps(input_b)); input_b += 8; _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); const __m256 vb = _mm256_maskload_ps(input_b, vmask); vacc = _mm256_min_ps(vacc, vb); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
2,244
26.378049
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-avx-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__avx_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_min_ps(vacc, _mm256_loadu_ps(input_b)); input_b += 8; _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); const __m256 vb = _mm256_maskload_ps(input_b, vmask); vacc = _mm256_min_ps(vacc, vb); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
1,814
26.089552
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-avx512f-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__avx512f_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc = _mm512_loadu_ps(input_a); input_a += 16; vacc = _mm512_min_ps(vacc, _mm512_loadu_ps(input_b)); input_b += 16; _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_min_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b)); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,564
26.946429
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-avx512f-x32.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__avx512f_x32( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) { __m512 vacc0 = _mm512_loadu_ps(input_a); __m512 vacc1 = _mm512_loadu_ps(input_a + 16); input_a += 32; vacc0 = _mm512_min_ps(vacc0, _mm512_loadu_ps(input_b)); vacc1 = _mm512_min_ps(vacc1, _mm512_loadu_ps(input_b + 16)); input_b += 32; _mm512_storeu_ps(output, vacc0); _mm512_storeu_ps(output + 16, vacc1); output += 32; } for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc = _mm512_loadu_ps(input_a); input_a += 16; vacc = _mm512_min_ps(vacc, _mm512_loadu_ps(input_b)); input_b += 16; _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_min_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b)); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,996
27.126761
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-neon-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__neon_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; const float32x4_t vb = vld1q_f32(input_b); input_b += 4; float32x4_t vacc = vminq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); const float32x4_t vb = vld1q_f32(input_b); float32x4_t vacc = vminq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,456
25.017857
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-neon-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__neon_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(input_a); input_a += 4; const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4; const float32x4_t va1 = vld1q_f32(input_a); input_a += 4; const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4; float32x4_t vacc0 = vminq_f32(va0, vb0); float32x4_t vacc1 = vminq_f32(va1, vb1); vst1q_f32(output, vacc0); output += 4; vst1q_f32(output, vacc1); output += 4; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; const float32x4_t vb = vld1q_f32(input_b); input_b += 4; float32x4_t vacc = vminq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); const float32x4_t vb = vld1q_f32(input_b); float32x4_t vacc = vminq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,955
26.942857
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = math_min_f32(va, vb); *output++ = vacc; } }
926
23.394737
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = math_min_f32(va0, vb0); float vacc1 = math_min_f32(va1, vb1); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = math_min_f32(va, vb); *output = vacc; } }
1,324
22.245614
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = math_min_f32(va0, vb0); float vacc1 = math_min_f32(va1, vb1); float vacc2 = math_min_f32(va2, vb2); float vacc3 = math_min_f32(va3, vb3); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = math_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,633
23.38806
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = math_min_f32(va0, vb0); float vacc1 = math_min_f32(va1, vb1); float vacc2 = math_min_f32(va2, vb2); float vacc3 = math_min_f32(va3, vb3); float vacc4 = math_min_f32(va4, vb4); float vacc5 = math_min_f32(va5, vb5); float vacc6 = math_min_f32(va6, vb6); float vacc7 = math_min_f32(va7, vb7); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = math_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,165
25.096386
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-sse-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__sse_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; const __m128 vb = _mm_loadu_ps(input_b); input_b += 4; __m128 vacc = _mm_min_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); const __m128 vb = _mm_loadu_ps(input_b); __m128 vacc = _mm_min_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,464
23.016393
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-sse-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__sse_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(input_a); const __m128 va1 = _mm_loadu_ps(input_a + 4); input_a += 8; const __m128 vb0 = _mm_loadu_ps(input_b); const __m128 vb1 = _mm_loadu_ps(input_b + 4); input_b += 8; __m128 vacc0 = _mm_min_ps(va0, vb0); __m128 vacc1 = _mm_min_ps(va1, vb1); _mm_storeu_ps(output, vacc0); _mm_storeu_ps(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; const __m128 vb = _mm_loadu_ps(input_b); input_b += 4; __m128 vacc = _mm_min_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); const __m128 vb = _mm_loadu_ps(input_b); __m128 vacc = _mm_min_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,939
23.556962
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasm-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasm_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; } }
934
23.605263
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasm-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasm_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = __builtin_wasm_min_f32(va0, vb0); float vacc1 = __builtin_wasm_min_f32(va1, vb1); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = __builtin_wasm_min_f32(va, vb); *output = vacc; } }
1,352
22.736842
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = __builtin_wasm_min_f32(va0, vb0); float vacc1 = __builtin_wasm_min_f32(va1, vb1); float vacc2 = __builtin_wasm_min_f32(va2, vb2); float vacc3 = __builtin_wasm_min_f32(va3, vb3); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,681
24.104478
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = __builtin_wasm_min_f32(va0, vb0); float vacc1 = __builtin_wasm_min_f32(va1, vb1); float vacc2 = __builtin_wasm_min_f32(va2, vb2); float vacc3 = __builtin_wasm_min_f32(va3, vb3); float vacc4 = __builtin_wasm_min_f32(va4, vb4); float vacc5 = __builtin_wasm_min_f32(va5, vb5); float vacc6 = __builtin_wasm_min_f32(va6, vb6); float vacc7 = __builtin_wasm_min_f32(va7, vb7); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,253
26.156627
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-arm-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_arm_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_min(va0, vb0); v128_t vacc1 = wasm_f32x4_min(va1, vb1); v128_t vacc2 = wasm_f32x4_min(va2, vb2); v128_t vacc3 = wasm_f32x4_min(va3, vb3); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_min(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,369
25.931818
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-arm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_arm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_min(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,487
23
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-arm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_arm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); input_b += 8; v128_t vacc0 = wasm_f32x4_min(va0, vb0); v128_t vacc1 = wasm_f32x4_min(va1, vb1); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_min(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,982
23.7875
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-x86-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_x86_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_pmin(va0, vb0); v128_t vacc1 = wasm_f32x4_pmin(va1, vb1); v128_t vacc2 = wasm_f32x4_pmin(va2, vb2); v128_t vacc3 = wasm_f32x4_pmin(va3, vb3); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_pmin(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_pmin(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,375
26
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-x86-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_x86_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_pmin(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_pmin(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,489
23.032258
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmin-wasmsimd-x86-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmin_ukernel__wasmsimd_x86_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); input_b += 8; v128_t vacc0 = wasm_f32x4_pmin(va0, vb0); v128_t vacc1 = wasm_f32x4_pmin(va1, vb1); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_pmin(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_pmin(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,986
23.8375
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-avx-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__avx_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m256 vb = _mm256_broadcast_ss(input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m256 vacc0 = _mm256_loadu_ps(input_a); __m256 vacc1 = _mm256_loadu_ps(input_a + 8); input_a += 16; vacc0 = _mm256_min_ps(vacc0, vb); vacc1 = _mm256_min_ps(vacc1, vb); _mm256_storeu_ps(output, vacc0); _mm256_storeu_ps(output + 8, vacc1); output += 16; } for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_min_ps(vacc, vb); _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); vacc = _mm256_min_ps(vacc, vb); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
2,131
25.65
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-avx-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__avx_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m256 vb = _mm256_broadcast_ss(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_min_ps(vacc, vb); _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); vacc = _mm256_min_ps(vacc, vb); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
1,768
25.80303
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-avx512f-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__avx512f_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m512 vb = _mm512_set1_ps(*input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc0 = _mm512_loadu_ps(input_a); input_a += 16; vacc0 = _mm512_min_ps(vacc0, vb); _mm512_storeu_ps(output, vacc0); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_min_ps(vmask, vacc, vb); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,541
26.052632
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-avx512f-x32.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__avx512f_x32( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m512 vb = _mm512_set1_ps(*input_b); for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) { __m512 vacc0 = _mm512_loadu_ps(input_a); __m512 vacc1 = _mm512_loadu_ps(input_a + 16); input_a += 32; vacc0 = _mm512_min_ps(vacc0, vb); vacc1 = _mm512_min_ps(vacc1, vb); _mm512_storeu_ps(output, vacc0); _mm512_storeu_ps(output + 16, vacc1); output += 32; } for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc = _mm512_loadu_ps(input_a); input_a += 16; vacc = _mm512_min_ps(vacc, vb); _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_min_ps(vmask, vacc, vb); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,899
26.536232
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-neon-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-neon.c.in // Generator: tools/xngen // // Copyright 2_lo9 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__neon_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t vb = vld1q_dup_f32(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; float32x4_t vacc = vminq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); float32x4_t vacc = vminq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,400
24.472727
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-neon-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-neon.c.in // Generator: tools/xngen // // Copyright 2_lo9 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__neon_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t vb = vld1q_dup_f32(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4; float32x4_t vaccl = vld1q_f32(input_a); input_a += 4; vacc_ = vminq_f32(vacc_, vb); vaccl = vminq_f32(vaccl, vb); vst1q_f32(output, vacc_); output += 4; vst1q_f32(output, vaccl); output += 4; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; float32x4_t vacc = vminq_f32(va, vb); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); float32x4_t vacc = vminq_f32(va, vb); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,745
25.059701
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; float vacc = math_min_f32(va, vb); *output++ = vacc; } }
924
23.342105
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; float vacc0 = math_min_f32(va0, vb); float vacc1 = math_min_f32(va1, vb); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; float vacc = math_min_f32(va, vb); *output = vacc; } }
1,235
22.320755
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; float vacc0 = math_min_f32(va0, vb); float vacc1 = math_min_f32(va1, vb); float vacc2 = math_min_f32(va2, vb); float vacc3 = math_min_f32(va3, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = math_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,470
23.114754
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; float vacc0 = math_min_f32(va0, vb); float vacc1 = math_min_f32(va1, vb); float vacc2 = math_min_f32(va2, vb); float vacc3 = math_min_f32(va3, vb); float vacc4 = math_min_f32(va4, vb); float vacc5 = math_min_f32(va5, vb); float vacc6 = math_min_f32(va6, vb); float vacc7 = math_min_f32(va7, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = math_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,862
24.520548
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-sse-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__sse_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 vb = _mm_load1_ps(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; __m128 vacc = _mm_min_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); __m128 vacc = _mm_min_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,399
23.561404
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-sse-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__sse_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 vb = _mm_load1_ps(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(input_a); const __m128 va1 = _mm_loadu_ps(input_a + 4); input_a += 8; __m128 vacc0 = _mm_min_ps(va0, vb); __m128 vacc1 = _mm_min_ps(va1, vb); _mm_storeu_ps(output, vacc0); _mm_storeu_ps(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; __m128 vacc = _mm_min_ps(va, vb); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); __m128 vacc = _mm_min_ps(va, vb); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,757
23.760563
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasm-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasm_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; } }
932
23.552632
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasm-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasm_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; float vacc0 = __builtin_wasm_min_f32(va0, vb); float vacc1 = __builtin_wasm_min_f32(va1, vb); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; float vacc = __builtin_wasm_min_f32(va, vb); *output = vacc; } }
1,263
22.849057
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; float vacc0 = __builtin_wasm_min_f32(va0, vb); float vacc1 = __builtin_wasm_min_f32(va1, vb); float vacc2 = __builtin_wasm_min_f32(va2, vb); float vacc3 = __builtin_wasm_min_f32(va3, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,518
23.901639
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float vb = *input_b; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; float vacc0 = __builtin_wasm_min_f32(va0, vb); float vacc1 = __builtin_wasm_min_f32(va1, vb); float vacc2 = __builtin_wasm_min_f32(va2, vb); float vacc3 = __builtin_wasm_min_f32(va3, vb); float vacc4 = __builtin_wasm_min_f32(va4, vb); float vacc5 = __builtin_wasm_min_f32(va5, vb); float vacc6 = __builtin_wasm_min_f32(va6, vb); float vacc7 = __builtin_wasm_min_f32(va7, vb); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; float vacc = __builtin_wasm_min_f32(va, vb); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,950
25.726027
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-arm-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_arm_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; v128_t vy0 = wasm_f32x4_min(va0, vb); v128_t vy1 = wasm_f32x4_min(va1, vb); v128_t vy2 = wasm_f32x4_min(va2, vb); v128_t vy3 = wasm_f32x4_min(va3, vb); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); wasm_v128_store(output + 8, vy2); wasm_v128_store(output + 12, vy3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_min(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
2,050
24.962025
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-arm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_arm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_min(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,413
22.966102
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-arm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_arm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; v128_t vy0 = wasm_f32x4_min(va0, vb); v128_t vy1 = wasm_f32x4_min(va1, vb); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_min(va, vb); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_min(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,779
23.383562
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-x86-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_x86_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; v128_t vy0 = wasm_f32x4_pmin(vb, va0); v128_t vy1 = wasm_f32x4_pmin(vb, va1); v128_t vy2 = wasm_f32x4_pmin(vb, va2); v128_t vy3 = wasm_f32x4_pmin(vb, va3); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); wasm_v128_store(output + 8, vy2); wasm_v128_store(output + 12, vy3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmin(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmin(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
2,056
25.037975
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-x86-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_x86_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmin(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmin(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,415
23
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vminc-wasmsimd-x86-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vopc-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vminc_ukernel__wasmsimd_x86_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vb = wasm_v128_load32_splat(input_b); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; v128_t vy0 = wasm_f32x4_pmin(vb, va0); v128_t vy1 = wasm_f32x4_pmin(vb, va1); wasm_v128_store(output, vy0); wasm_v128_store(output + 4, vy1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; v128_t vy = wasm_f32x4_pmin(vb, va); wasm_v128_store(output, vy); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); v128_t vy = wasm_f32x4_pmin(vb, va); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vy, 0); vy = wasm_v64x2_shuffle(vy, vy, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vy, 0); } } }
1,783
23.438356
90
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-avx-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__avx_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m256 voutput_min = _mm256_load_ps(params->avx.min); const __m256 voutput_max = _mm256_load_ps(params->avx.max); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m256 vacc0 = _mm256_loadu_ps(input_a); __m256 vacc1 = _mm256_loadu_ps(input_a + 8); input_a += 16; vacc0 = _mm256_mul_ps(vacc0, _mm256_loadu_ps(input_b)); vacc1 = _mm256_mul_ps(vacc1, _mm256_loadu_ps(input_b + 8)); input_b += 16; vacc0 = _mm256_max_ps(voutput_min, vacc0); vacc1 = _mm256_max_ps(voutput_min, vacc1); vacc0 = _mm256_min_ps(voutput_max, vacc0); vacc1 = _mm256_min_ps(voutput_max, vacc1); _mm256_storeu_ps(output, vacc0); _mm256_storeu_ps(output + 8, vacc1); output += 16; } for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_mul_ps(vacc, _mm256_loadu_ps(input_b)); input_b += 8; vacc = _mm256_max_ps(voutput_min, vacc); vacc = _mm256_min_ps(voutput_max, vacc); _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); const __m256 vb = _mm256_maskload_ps(input_b, vmask); vacc = _mm256_mul_ps(vacc, vb); vacc = _mm256_max_ps(voutput_min, vacc); vacc = _mm256_min_ps(voutput_max, vacc); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
2,743
28.505376
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-avx-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__avx_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m256 voutput_min = _mm256_load_ps(params->avx.min); const __m256 voutput_max = _mm256_load_ps(params->avx.max); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { __m256 vacc = _mm256_loadu_ps(input_a); input_a += 8; vacc = _mm256_mul_ps(vacc, _mm256_loadu_ps(input_b)); input_b += 8; vacc = _mm256_max_ps(voutput_min, vacc); vacc = _mm256_min_ps(voutput_max, vacc); _mm256_storeu_ps(output, vacc); output += 8; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 7 * sizeof(float)); const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - batch)); __m256 vacc = _mm256_maskload_ps(input_a, vmask); const __m256 vb = _mm256_maskload_ps(input_b, vmask); vacc = _mm256_mul_ps(vacc, vb); vacc = _mm256_max_ps(voutput_min, vacc); vacc = _mm256_min_ps(voutput_max, vacc); __m128 vacc_lo = _mm256_castps256_ps128(vacc); if (batch & (4 * sizeof(float))) { _mm_storeu_ps(output, vacc_lo); vacc_lo = _mm256_extractf128_ps(vacc, 1); output += 4; } if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc_lo); vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc_lo); } } }
2,124
28.109589
112
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-avx512f-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__avx512f_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m512 voutput_min = _mm512_set1_ps(params->scalar.min); const __m512 voutput_max = _mm512_set1_ps(params->scalar.max); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc = _mm512_loadu_ps(input_a); input_a += 16; vacc = _mm512_mul_ps(vacc, _mm512_loadu_ps(input_b)); input_b += 16; vacc = _mm512_max_ps(voutput_min, vacc); vacc = _mm512_min_ps(voutput_max, vacc); _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_mul_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b)); vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc); vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc); _mm512_mask_storeu_ps(output, vmask, vacc); } }
1,906
29.758065
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-avx512f-x32.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-avx512f.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__avx512f_x32( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m512 voutput_min = _mm512_set1_ps(params->scalar.min); const __m512 voutput_max = _mm512_set1_ps(params->scalar.max); for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) { __m512 vacc0 = _mm512_loadu_ps(input_a); __m512 vacc1 = _mm512_loadu_ps(input_a + 16); input_a += 32; vacc0 = _mm512_mul_ps(vacc0, _mm512_loadu_ps(input_b)); vacc1 = _mm512_mul_ps(vacc1, _mm512_loadu_ps(input_b + 16)); input_b += 32; vacc0 = _mm512_max_ps(voutput_min, vacc0); vacc1 = _mm512_max_ps(voutput_min, vacc1); vacc0 = _mm512_min_ps(voutput_max, vacc0); vacc1 = _mm512_min_ps(voutput_max, vacc1); _mm512_storeu_ps(output, vacc0); _mm512_storeu_ps(output + 16, vacc1); output += 32; } for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { __m512 vacc = _mm512_loadu_ps(input_a); input_a += 16; vacc = _mm512_mul_ps(vacc, _mm512_loadu_ps(input_b)); input_b += 16; vacc = _mm512_max_ps(voutput_min, vacc); vacc = _mm512_min_ps(voutput_max, vacc); _mm512_storeu_ps(output, vacc); output += 16; } if XNN_UNLIKELY(batch != 0) { assert(batch >= 1 * sizeof(float)); assert(batch <= 15 * sizeof(float)); // Prepare mask for valid 32-bit elements (depends on batch). batch >>= XNN_LOG2_SIZEOF_FLOAT; const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1))); __m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a); vacc = _mm512_maskz_mul_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b)); vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc); vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc); _mm512_mask_storeu_ps(output, vmask, vacc); } }
2,527
29.829268
105
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-neon-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__neon_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t voutput_min = vld1q_dup_f32(&params->scalar.min); const float32x4_t voutput_max = vld1q_dup_f32(&params->scalar.max); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; const float32x4_t vb = vld1q_f32(input_b); input_b += 4; float32x4_t vacc = vmulq_f32(va, vb); vacc = vmaxq_f32(vacc, voutput_min); vacc = vminq_f32(vacc, voutput_max); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); const float32x4_t vb = vld1q_f32(input_b); float32x4_t vacc = vmulq_f32(va, vb); vacc = vmaxq_f32(vacc, voutput_min); vacc = vminq_f32(vacc, voutput_max); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
1,766
27.5
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-neon-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__neon_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float32x4_t voutput_min = vld1q_dup_f32(&params->scalar.min); const float32x4_t voutput_max = vld1q_dup_f32(&params->scalar.max); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float32x4_t va0 = vld1q_f32(input_a); input_a += 4; const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4; const float32x4_t va1 = vld1q_f32(input_a); input_a += 4; const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4; float32x4_t vacc0 = vmulq_f32(va0, vb0); float32x4_t vacc1 = vmulq_f32(va1, vb1); vacc0 = vmaxq_f32(vacc0, voutput_min); vacc1 = vmaxq_f32(vacc1, voutput_min); vacc0 = vminq_f32(vacc0, voutput_max); vacc1 = vminq_f32(vacc1, voutput_max); vst1q_f32(output, vacc0); output += 4; vst1q_f32(output, vacc1); output += 4; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float32x4_t va = vld1q_f32(input_a); input_a += 4; const float32x4_t vb = vld1q_f32(input_b); input_b += 4; float32x4_t vacc = vmulq_f32(va, vb); vacc = vmaxq_f32(vacc, voutput_min); vacc = vminq_f32(vacc, voutput_max); vst1q_f32(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const float32x4_t va = vld1q_f32(input_a); const float32x4_t vb = vld1q_f32(input_b); float32x4_t vacc = vmulq_f32(va, vb); vacc = vmaxq_f32(vacc, voutput_min); vacc = vminq_f32(vacc, voutput_max); float32x2_t vacc_lo = vget_low_f32(vacc); if (batch & (2 * sizeof(float))) { vst1_f32(output, vacc_lo); output += 2; vacc_lo = vget_high_f32(vacc); } if (batch & (1 * sizeof(float))) { vst1_lane_f32(output, vacc_lo, 0); } } }
2,438
29.111111
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, voutput_min); vacc = math_min_f32(vacc, voutput_max); *output++ = vacc; } }
1,103
25.285714
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; vacc0 = math_max_f32(vacc0, voutput_min); vacc1 = math_max_f32(vacc1, voutput_min); vacc0 = math_min_f32(vacc0, voutput_max); vacc1 = math_min_f32(vacc1, voutput_max); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = va * vb; vacc = math_max_f32(vacc, voutput_min); vacc = math_min_f32(vacc, voutput_max); *output = vacc; } }
1,660
24.166667
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; vacc0 = math_max_f32(vacc0, voutput_min); vacc1 = math_max_f32(vacc1, voutput_min); vacc2 = math_max_f32(vacc2, voutput_min); vacc3 = math_max_f32(vacc3, voutput_min); vacc0 = math_min_f32(vacc0, voutput_max); vacc1 = math_min_f32(vacc1, voutput_max); vacc2 = math_min_f32(vacc2, voutput_max); vacc3 = math_min_f32(vacc3, voutput_max); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, voutput_min); vacc = math_min_f32(vacc, voutput_max); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,131
25.65
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; float vacc4 = va4 * vb4; float vacc5 = va5 * vb5; float vacc6 = va6 * vb6; float vacc7 = va7 * vb7; vacc0 = math_max_f32(vacc0, voutput_min); vacc1 = math_max_f32(vacc1, voutput_min); vacc2 = math_max_f32(vacc2, voutput_min); vacc3 = math_max_f32(vacc3, voutput_min); vacc4 = math_max_f32(vacc4, voutput_min); vacc5 = math_max_f32(vacc5, voutput_min); vacc6 = math_max_f32(vacc6, voutput_min); vacc7 = math_max_f32(vacc7, voutput_min); vacc0 = math_min_f32(vacc0, voutput_max); vacc1 = math_min_f32(vacc1, voutput_max); vacc2 = math_min_f32(vacc2, voutput_max); vacc3 = math_min_f32(vacc3, voutput_max); vacc4 = math_min_f32(vacc4, voutput_max); vacc5 = math_min_f32(vacc5, voutput_max); vacc6 = math_min_f32(vacc6, voutput_max); vacc7 = math_min_f32(vacc7, voutput_max); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, voutput_min); vacc = math_min_f32(vacc, voutput_max); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,979
27.653846
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-sse-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__sse_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 voutput_min = _mm_load_ps(params->sse.min); const __m128 voutput_max = _mm_load_ps(params->sse.max); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; const __m128 vb = _mm_loadu_ps(input_b); input_b += 4; __m128 vacc = _mm_mul_ps(va, vb); vacc = _mm_max_ps(vacc, voutput_min); vacc = _mm_min_ps(vacc, voutput_max); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); const __m128 vb = _mm_loadu_ps(input_b); __m128 vacc = _mm_mul_ps(va, vb); vacc = _mm_max_ps(vacc, voutput_min); vacc = _mm_min_ps(vacc, voutput_max); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
1,756
25.223881
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-sse-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-sse.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xmmintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__sse_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const __m128 voutput_min = _mm_load_ps(params->sse.min); const __m128 voutput_max = _mm_load_ps(params->sse.max); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const __m128 va0 = _mm_loadu_ps(input_a); const __m128 va1 = _mm_loadu_ps(input_a + 4); input_a += 8; const __m128 vb0 = _mm_loadu_ps(input_b); const __m128 vb1 = _mm_loadu_ps(input_b + 4); input_b += 8; __m128 vacc0 = _mm_mul_ps(va0, vb0); __m128 vacc1 = _mm_mul_ps(va1, vb1); vacc0 = _mm_max_ps(vacc0, voutput_min); vacc1 = _mm_max_ps(vacc1, voutput_min); vacc0 = _mm_min_ps(vacc0, voutput_max); vacc1 = _mm_min_ps(vacc1, voutput_max); _mm_storeu_ps(output, vacc0); _mm_storeu_ps(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const __m128 va = _mm_loadu_ps(input_a); input_a += 4; const __m128 vb = _mm_loadu_ps(input_b); input_b += 4; __m128 vacc = _mm_mul_ps(va, vb); vacc = _mm_max_ps(vacc, voutput_min); vacc = _mm_min_ps(vacc, voutput_max); _mm_storeu_ps(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const __m128 va = _mm_loadu_ps(input_a); const __m128 vb = _mm_loadu_ps(input_b); __m128 vacc = _mm_mul_ps(va, vb); vacc = _mm_max_ps(vacc, voutput_min); vacc = _mm_min_ps(vacc, voutput_max); if (batch & (2 * sizeof(float))) { _mm_storel_pi((__m64*) output, vacc); vacc = _mm_movehl_ps(vacc, vacc); output += 2; } if (batch & (1 * sizeof(float))) { _mm_store_ss(output, vacc); } } }
2,408
25.766667
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasm-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasm_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, voutput_min); vacc = __builtin_wasm_min_f32(vacc, voutput_max); *output++ = vacc; } }
1,121
25.714286
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasm-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasm_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min); vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min); vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max); vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, voutput_min); vacc = __builtin_wasm_min_f32(vacc, voutput_max); *output = vacc; } }
1,718
25.045455
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min); vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min); vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min); vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min); vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max); vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max); vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max); vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, voutput_min); vacc = __builtin_wasm_min_f32(vacc, voutput_max); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,229
26.875
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const float voutput_min = params->scalar.min; const float voutput_max = params->scalar.max; for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; float vacc4 = va4 * vb4; float vacc5 = va5 * vb5; float vacc6 = va6 * vb6; float vacc7 = va7 * vb7; vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min); vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min); vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min); vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min); vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min); vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min); vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min); vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min); vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max); vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max); vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max); vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max); vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max); vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max); vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max); vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, voutput_min); vacc = __builtin_wasm_min_f32(vacc, voutput_max); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
3,157
29.365385
75
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-arm-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_arm_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); v128_t vacc2 = wasm_f32x4_mul(va2, vb2); v128_t vacc3 = wasm_f32x4_mul(va3, vb3); vacc0 = wasm_f32x4_max(vacc0, voutput_min); vacc1 = wasm_f32x4_max(vacc1, voutput_min); vacc2 = wasm_f32x4_max(vacc2, voutput_min); vacc3 = wasm_f32x4_max(vacc3, voutput_min); vacc0 = wasm_f32x4_min(vacc0, voutput_max); vacc1 = wasm_f32x4_min(vacc1, voutput_max); vacc2 = wasm_f32x4_min(vacc2, voutput_max); vacc3 = wasm_f32x4_min(vacc3, voutput_max); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
3,094
29.048544
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-arm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_arm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,827
25.882353
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-arm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_arm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); input_b += 8; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); vacc0 = wasm_f32x4_max(vacc0, voutput_min); vacc1 = wasm_f32x4_max(vacc1, voutput_min); vacc0 = wasm_f32x4_min(vacc0, voutput_max); vacc1 = wasm_f32x4_min(vacc1, voutput_max); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_max(vacc, voutput_min); vacc = wasm_f32x4_min(vacc, voutput_max); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,515
26.648352
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-x86-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); v128_t vacc2 = wasm_f32x4_mul(va2, vb2); v128_t vacc3 = wasm_f32x4_mul(va3, vb3); vacc0 = wasm_f32x4_pmax(voutput_min, vacc0); vacc1 = wasm_f32x4_pmax(voutput_min, vacc1); vacc2 = wasm_f32x4_pmax(voutput_min, vacc2); vacc3 = wasm_f32x4_pmax(voutput_min, vacc3); vacc0 = wasm_f32x4_pmin(voutput_max, vacc0); vacc1 = wasm_f32x4_pmin(voutput_max, vacc1); vacc2 = wasm_f32x4_pmin(voutput_max, vacc2); vacc3 = wasm_f32x4_pmin(voutput_max, vacc3); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
3,106
29.165049
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-x86-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,831
25.941176
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-minmax-wasmsimd-x86-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_minmax_ukernel__wasmsimd_x86_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min); const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); input_b += 8; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); vacc0 = wasm_f32x4_pmax(voutput_min, vacc0); vacc1 = wasm_f32x4_pmax(voutput_min, vacc1); vacc0 = wasm_f32x4_pmin(voutput_max, vacc0); vacc1 = wasm_f32x4_pmin(voutput_max, vacc1); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_f32x4_pmax(voutput_min, vacc); vacc = wasm_f32x4_pmin(voutput_max, vacc); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,523
26.736264
89
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, 0.0f); *output++ = vacc; } }
952
23.435897
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; vacc0 = math_max_f32(vacc0, 0.0f); vacc1 = math_max_f32(vacc1, 0.0f); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = va * vb; vacc = math_max_f32(vacc, 0.0f); *output = vacc; } }
1,402
22.383333
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; vacc0 = math_max_f32(vacc0, 0.0f); vacc1 = math_max_f32(vacc1, 0.0f); vacc2 = math_max_f32(vacc2, 0.0f); vacc3 = math_max_f32(vacc3, 0.0f); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, 0.0f); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,765
23.527778
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; float vacc4 = va4 * vb4; float vacc5 = va5 * vb5; float vacc6 = va6 * vb6; float vacc7 = va7 * vb7; vacc0 = math_max_f32(vacc0, 0.0f); vacc1 = math_max_f32(vacc1, 0.0f); vacc2 = math_max_f32(vacc2, 0.0f); vacc3 = math_max_f32(vacc3, 0.0f); vacc4 = math_max_f32(vacc4, 0.0f); vacc5 = math_max_f32(vacc5, 0.0f); vacc6 = math_max_f32(vacc6, 0.0f); vacc7 = math_max_f32(vacc7, 0.0f); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = math_max_f32(vacc, 0.0f); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,401
25.108696
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasm-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasm_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, 0.0f); *output++ = vacc; } }
960
23.641026
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasm-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasm_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f); vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f); output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, 0.0f); *output = vacc; } }
1,430
22.85
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasm-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasm_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f); vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f); vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f); vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, 0.0f); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,813
24.194444
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasm-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasm_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; float vacc4 = va4 * vb4; float vacc5 = va5 * vb5; float vacc6 = va6 * vb6; float vacc7 = va7 * vb7; vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f); vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f); vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f); vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f); vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f); vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f); vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f); vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f); output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; vacc = __builtin_wasm_max_f32(vacc, 0.0f); *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,489
26.065217
73
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasmsimd-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasmsimd_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vzero = wasm_i32x4_const_splat(0); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); v128_t vacc2 = wasm_f32x4_mul(va2, vb2); v128_t vacc3 = wasm_f32x4_mul(va3, vb3); vacc0 = wasm_i32x4_max(vacc0, vzero); vacc1 = wasm_i32x4_max(vacc1, vzero); vacc2 = wasm_i32x4_max(vacc2, vzero); vacc3 = wasm_i32x4_max(vacc3, vzero); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,665
27.063158
87
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasmsimd-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasmsimd_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vzero = wasm_i32x4_const_splat(0); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
1,615
23.861538
87
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-relu-wasmsimd-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_relu_ukernel__wasmsimd_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); const v128_t vzero = wasm_i32x4_const_splat(0); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); input_a += 8; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); input_b += 8; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); vacc0 = wasm_i32x4_max(vacc0, vzero); vacc1 = wasm_i32x4_max(vacc1, vzero); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); output += 8; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); vacc = wasm_i32x4_max(vacc, vzero); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,194
24.823529
87
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-scalar-x1.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_ukernel__scalar_x1( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= sizeof(float); batch -= sizeof(float)) { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; *output++ = vacc; } }
913
23.052632
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-scalar-x2.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_ukernel__scalar_x2( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; input_a += 2; const float vb0 = input_b[0]; const float vb1 = input_b[1]; input_b += 2; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; output[0] = vacc0; output[1] = vacc1; output += 2; } if XNN_UNLIKELY(batch != 0) { assert(batch == sizeof(float)); const float va = *input_a; const float vb = *input_b; float vacc = va * vb; *output = vacc; } }
1,285
21.561404
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-scalar-x4.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_ukernel__scalar_x4( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; input_a += 4; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; input_b += 4; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output += 4; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
1,568
22.41791
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-scalar-x8.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-scalar.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_ukernel__scalar_x8( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) { const float va0 = input_a[0]; const float va1 = input_a[1]; const float va2 = input_a[2]; const float va3 = input_a[3]; const float va4 = input_a[4]; const float va5 = input_a[5]; const float va6 = input_a[6]; const float va7 = input_a[7]; input_a += 8; const float vb0 = input_b[0]; const float vb1 = input_b[1]; const float vb2 = input_b[2]; const float vb3 = input_b[3]; const float vb4 = input_b[4]; const float vb5 = input_b[5]; const float vb6 = input_b[6]; const float vb7 = input_b[7]; input_b += 8; float vacc0 = va0 * vb0; float vacc1 = va1 * vb1; float vacc2 = va2 * vb2; float vacc3 = va3 * vb3; float vacc4 = va4 * vb4; float vacc5 = va5 * vb5; float vacc6 = va6 * vb6; float vacc7 = va7 * vb7; output[0] = vacc0; output[1] = vacc1; output[2] = vacc2; output[3] = vacc3; output[4] = vacc4; output[5] = vacc5; output[6] = vacc6; output[7] = vacc7; output += 8; } if XNN_UNLIKELY(batch != 0) { do { const float va = *input_a++; const float vb = *input_b++; float vacc = va * vb; *output++ = vacc; batch -= sizeof(float); } while (batch != 0); } }
2,048
23.686747
76
c
XNNPACK
XNNPACK-master/src/f32-vbinary/gen/f32-vmul-wasmsimd-x16.c
// Auto-generated file. Do not edit! // Template: src/f32-vbinary/vop-wasmsimd.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/common.h> #include <xnnpack/vbinary.h> void xnn_f32_vmul_ukernel__wasmsimd_x16( size_t batch, const float* input_a, const float* input_b, float* output, const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(float) == 0); assert(input_a != NULL); assert(input_b != NULL); assert(output != NULL); for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) { const v128_t va0 = wasm_v128_load(input_a); const v128_t va1 = wasm_v128_load(input_a + 4); const v128_t va2 = wasm_v128_load(input_a + 8); const v128_t va3 = wasm_v128_load(input_a + 12); input_a += 16; const v128_t vb0 = wasm_v128_load(input_b); const v128_t vb1 = wasm_v128_load(input_b + 4); const v128_t vb2 = wasm_v128_load(input_b + 8); const v128_t vb3 = wasm_v128_load(input_b + 12); input_b += 16; v128_t vacc0 = wasm_f32x4_mul(va0, vb0); v128_t vacc1 = wasm_f32x4_mul(va1, vb1); v128_t vacc2 = wasm_f32x4_mul(va2, vb2); v128_t vacc3 = wasm_f32x4_mul(va3, vb3); wasm_v128_store(output, vacc0); wasm_v128_store(output + 4, vacc1); wasm_v128_store(output + 8, vacc2); wasm_v128_store(output + 12, vacc3); output += 16; } for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) { const v128_t va = wasm_v128_load(input_a); input_a += 4; const v128_t vb = wasm_v128_load(input_b); input_b += 4; v128_t vacc = wasm_f32x4_mul(va, vb); wasm_v128_store(output, vacc); output += 4; } if XNN_UNLIKELY(batch != 0) { const v128_t va = wasm_v128_load(input_a); const v128_t vb = wasm_v128_load(input_b); v128_t vacc = wasm_f32x4_mul(va, vb); if (batch & (2 * sizeof(float))) { wasm_v128_store64_lane(output, vacc, 0); vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1); output += 2; } if (batch & (1 * sizeof(float))) { wasm_v128_store32_lane(output, vacc, 0); } } }
2,365
25.886364
90
c